aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgios Pinitas <georgios.pinitas@arm.com>2020-07-06 19:10:38 +0100
committerGeorgios Pinitas <georgios.pinitas@arm.com>2020-07-07 15:07:37 +0000
commit0cc50ed757f06f4f076e261cb7253dd67264dec6 (patch)
tree866306f780f7529ea172d78210355aed761ad7a4
parentab23dd0fbc632063235a6ad408241dc79a35d3e4 (diff)
downloadComputeLibrary-0cc50ed757f06f4f076e261cb7253dd67264dec6.tar.gz
COMPMID-3324: Remove pretransposed support from NEON backend
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com> Change-Id: I394c6c539969940e0119cbc14174909d47e65de6 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3519 Tested-by: Arm Jenkins <bsgcomp@arm.com> Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
-rw-r--r--Android.bp8
-rw-r--r--src/core/NEON/kernels/arm_gemm/buffer_manager.hpp331
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp18
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp11
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp60
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_int8.cpp19
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp281
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved_2d.hpp445
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp14
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_native.hpp144
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp10
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp19
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemv_native_transposed.hpp135
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/a55.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp22
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/a55.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4/generic.cpp1708
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans.hpp57
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans/generic.cpp1072
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x8.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x6.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x8.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4/generic.cpp36
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp20
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4/generic.cpp36
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4/generic.cpp18
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4.hpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4/generic.cpp18
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4/generic.cpp3290
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4/generic.cpp3814
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp2070
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4/generic.cpp4494
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4.hpp89
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4/generic.cpp4494
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp6
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp4
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp50
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantized.cpp2
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp16
-rw-r--r--src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp16
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.cpp67
-rw-r--r--src/core/NEON/kernels/assembly/Helpers.h122
-rw-r--r--src/core/NEON/kernels/assembly/arm_gemm.hpp10
-rw-r--r--src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp4
78 files changed, 380 insertions, 23407 deletions
diff --git a/Android.bp b/Android.bp
index b0cacea7b1..fe0df1d7b5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -366,7 +366,6 @@ cc_library_static {
"src/core/NEON/kernels/arm_gemm/mergeresults.cpp",
"src/core/NEON/kernels/arm_gemm/misc.cpp",
"src/core/NEON/kernels/arm_gemm/quantized.cpp",
- "src/core/NEON/kernels/assembly/Helpers.cpp",
"src/core/NEON/kernels/assembly/INEGEMMWrapperKernel.cpp",
"src/core/NEON/kernels/convolution/common/padding.cpp",
"src/core/NEON/kernels/convolution/common/qasymm8.cpp",
@@ -796,14 +795,12 @@ cc_library_static {
"src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_bf16fp32_mmla_12x8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_s8s32_mmla_12x8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_interleaved_u8u32_mmla_12x8/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_12x8/a53.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_12x8/a55.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_12x8/a55r1.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_12x8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_12x8/x1.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_pretransposed/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6/a55.cpp",
@@ -832,11 +829,6 @@ cc_library_static {
"src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_s8s32_mmla_3VLx8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_dot_3VLx8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/sve_interleaved_u8u32_mmla_3VLx8/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4/generic.cpp",
- "src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8/generic.cpp",
"src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8/generic.cpp",
diff --git a/src/core/NEON/kernels/arm_gemm/buffer_manager.hpp b/src/core/NEON/kernels/arm_gemm/buffer_manager.hpp
deleted file mode 100644
index 268b9ba6c7..0000000000
--- a/src/core/NEON/kernels/arm_gemm/buffer_manager.hpp
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#include <cstdlib>
-#include <vector>
-
-#ifndef NO_MULTI_THREADING
-#include <atomic>
-#include <mutex>
-
-#define USE_SEMAPHORE
-
-#ifdef USE_SEMAPHORE
-#include <condition_variable>
-#endif
-
-#endif
-
-namespace arm_gemm {
-
-#ifndef NO_MULTI_THREADING
-enum class BufferStatus {
- IDLE,
- POPULATING,
- BUSY
-};
-
-class Buffer {
-private:
- const int _maxusers; // Maximum permissible threads.
- void * const _storage; // Storage for buffer content.
-
- int _numusers; // Actual number of threads (might be lower).
-
- volatile BufferStatus _status = BufferStatus::IDLE; // Status
- std::atomic_int _users = { }; // How many users are still using the buffer.
- volatile int _index = 0; // Which block of data currently resides in the buffer.
-
- std::mutex _lock = { };
-#ifdef USE_SEMAPHORE
- std::condition_variable _cv = { };
-#endif
-
- template <typename T>
- void populate_buffer(T func) {
- func(_storage);
-
- /* Now mark it as ready. */
-#ifdef USE_SEMAPHORE
- {
- std::unique_lock<std::mutex> ul(_lock);
- _status = BufferStatus::BUSY;
- _cv.notify_all();
- }
-#else
- _status = BufferStatus::BUSY;
-#endif
- }
-
-public:
- Buffer(Buffer &) = delete;
- Buffer &operator= (Buffer &) = delete;
-
- Buffer(void *storage, int maxusers) : _maxusers(maxusers), _storage(storage), _numusers(maxusers) {
- _status = BufferStatus::IDLE;
- }
-
- /* Try and populate the given index.
- * Wait if the buffer is busy with previous index, then:
- *
- * If the buffer is idle, grab it and populate it.
- * If it's already being populated by another thread or is ready, return.
- */
- template <typename T>
- void try_populate(const int index, T func) {
- for (;;) {
-#ifdef USE_SEMAPHORE
- /* If it's busy with a previous index, wait on the semaphore. */
- if ((_status == BufferStatus::BUSY) && (_index != index)) {
- std::unique_lock<std::mutex> ul(_lock);
-
- if ((_status == BufferStatus::BUSY) && (_index != index)) {
- _cv.wait(ul);
- }
- }
-#endif
- /* Return if another thread is populating it already. */
- if ((_index == index) &&
- ((_status == BufferStatus::POPULATING) || (_status == BufferStatus::BUSY))) {
- return;
- }
-
- if (_status == BufferStatus::IDLE) {
- std::lock_guard<std::mutex> guard(_lock);
-
- /* If the buffer is still idle, we can grab it and populate it. */
- if (_status == BufferStatus::IDLE) {
- _status = BufferStatus::POPULATING;
- _index = index;
- _users = _numusers;
- break;
- }
- }
- }
-
- /* If we get here, fill in the buffer. */
- populate_buffer(func);
- }
-
- template <typename T>
- void *get(const int index, T func) {
- // Loop until we achieve something.
- for (;;) {
- // If the index is correct and the buffer status is busy then we can
- // just return the content. No locking is needed here as the index
- // cannot change (and status cannot change from BUSY) until all
- // users have finished.
- if ((_index == index) && (_status == BufferStatus::BUSY)) {
- return _storage;
- }
-
- /* If the buffer still has some previous content, or is being
- * populated, we can wait with the semaphore. */
-#ifdef USE_SEMAPHORE
- if (((_status == BufferStatus::BUSY) && (_index != index)) ||
- (_status == BufferStatus::POPULATING)) {
- std::unique_lock<std::mutex> ul(_lock);
-
- if (((_status == BufferStatus::BUSY) && (_index != index)) ||
- (_status == BufferStatus::POPULATING)) {
- _cv.wait(ul);
- }
- }
-#endif
-
- // If it's idle, we need to populate it. The IDLE->POPULATING
- // transition requires the lock.
- if (_status == BufferStatus::IDLE) {
- std::lock_guard<std::mutex> guard(_lock);
-
- /* If it's still idle, grab it. Otherwise drop through and
- * we'll do something else next time through the loop. */
- if (_status == BufferStatus::IDLE) {
- _status = BufferStatus::POPULATING;
- _index = index;
- _users = _numusers;
- break;
- }
- }
- }
-
- /* If we get here we need to populate the buffer. */
- populate_buffer(func);
-
- return _storage;
- }
-
- /* Threads call this when they have finished processing a buffer. We
- * simply (atomically) decrement the user count, and if it's hit zero we
- * flag the buffer as idle.
- */
- void release(void) {
- if (--_users == 0) {
-#ifdef USE_SEMAPHORE
- std::unique_lock<std::mutex> ul(_lock);
- _status = BufferStatus::IDLE;
- /* We notify all waiters as we expect one to do the populating
- * and any others to go and process and earlier block. */
- _cv.notify_all();
-#else
- _status = BufferStatus::IDLE;
-#endif
- }
- }
-
- /* This is called to change the number of users. */
- void set_numusers(int numusers) {
- _numusers = std::min(numusers, _maxusers);
- }
-};
-
-
-class BufferManager {
-private:
- /* This has to be a vector of Buffer *, because a Buffer cannot be moved
- * or copied due to atomic members. */
- std::vector<Buffer *> _buffers = { };
- const int _maxthreads;
- void * const _storage;
-
-public:
- BufferManager(BufferManager &) = delete;
- BufferManager & operator=(BufferManager &) = delete;
-
- // Say how much storage is needed.
- static inline size_t get_storage_requirement(const int maxthreads, const size_t buffersize) {
- return buffersize * ((maxthreads == 1) ? 1 : 3);
- }
-
- BufferManager(const int maxthreads, const size_t buffersize, void *storage) : _maxthreads(maxthreads), _storage(storage) {
- const int numbuffers = (maxthreads == 1) ? 1 : 3;
-
- /* We don't need any Buffer objects in single thread mode. */
- if (_maxthreads == 1) {
- return;
- }
-
- /* Use intptr_t to avoid performing arithmetic on a void * */
- intptr_t storage_int = reinterpret_cast<intptr_t>(_storage);
-
- for (int i=0; i<numbuffers; i++) {
- _buffers.push_back(new Buffer(reinterpret_cast<void *>(storage_int), _maxthreads));
- storage_int += buffersize;
- }
- }
-
- ~BufferManager() {
- while (_buffers.size()) {
- delete _buffers.back();
- _buffers.pop_back();
- }
- }
-
- template <typename T>
- void *get(const int index, T func) {
- /* In single thread mode, we just directly call the populating
- * function on the (single) buffer, otherwise forward to the
- * relevant Buffer. */
- if (_maxthreads==1) {
- func(_storage);
- return _storage;
- } else {
- return _buffers[index % _buffers.size()]->get(index, func);
- }
- }
-
- template <typename T>
- void try_populate(const int index, T func) {
- /* No need for this in single thread mode. */
- if (_maxthreads==1) {
- return;
- }
-
- _buffers[index % _buffers.size()]->try_populate(index, func);
- }
-
- void release(const int index) {
- /* No need for this in single thread mode. */
- if (_maxthreads==1) {
- return;
- }
-
- _buffers[index % _buffers.size()]->release();
- }
-
- void set_nthreads(int threads) {
- if (_maxthreads==1) {
- return;
- }
-
- for(unsigned int i=0; i<_buffers.size(); i++) {
- _buffers[i]->set_numusers(threads);
- }
- }
-};
-
-#else
-
-/* Trivial implementation if threading is disabled at compile time.
- *
- * Here, we only need storage for a single buffer. The 'get' method needs
- * to call the supplied function to populate the buffer and then return it.
- * All the other methods do nothing.
- */
-
-class BufferManager {
-private:
- void * const _storage;
-
-public:
- BufferManager(BufferManager &) = delete;
- BufferManager & operator=(BufferManager &) = delete;
-
- BufferManager(const int, const size_t, void *storage) : _storage(storage) { }
-
- ~BufferManager() { }
-
- // Say how much storage is needed.
- static inline size_t get_storage_requirement(const int, const size_t buffersize) {
- return buffersize;
- }
-
- template <typename T>
- void try_populate(const int, T) { }
-
- void release(const int) { }
-
- template <typename T>
- void *get(const int, T func) {
- func(_storage);
- return _storage;
- }
-
- void set_nthreads(int) { }
-};
-
-#endif
-
-} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
index fad0e84bbb..f3b66528a4 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp
@@ -27,9 +27,7 @@
#include "gemm_hybrid.hpp"
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
-#include "gemm_native.hpp"
#include "gemv_batched.hpp"
-#include "gemv_native_transposed.hpp"
#include "gemv_pretransposed.hpp"
#include "kernels/a64_interleaved_bf16fp32_dot_12x8.hpp"
@@ -38,7 +36,6 @@
#include "kernels/a32_sgemm_8x6.hpp"
#include "kernels/sve_interleaved_bf16fp32_dot_3VLx8.hpp"
#include "kernels/sve_interleaved_bf16fp32_mmla_3VLx8.hpp"
-#include "kernels/sve_native_bf16fp32_dot_4VLx4.hpp"
#include "kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp"
#include "kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp"
#include "kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp"
@@ -53,38 +50,31 @@ static const GemmImplementation<bfloat16, float> gemm_bf16_methods[] =
{
GemmMethod::GEMM_HYBRID,
"hybrid_bf16fp32_mmla_6VLx2",
- [](const GemmArgs &args) { return (args._Ksize>=8 && !args._trA && args._pretransposed_hint); },
+ [](const GemmArgs &args) { return (args._Ksize>=8); },
[](const GemmArgs &args) { return ((args._Msize <= 4) && (args._Nsize <= hybrid_bf16fp32_mmla_6VLx2::out_width())); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_bf16fp32_mmla_6VLx2, bfloat16, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_bf16fp32_mmla_8VLx2",
- [](const GemmArgs &args) { return (args._Ksize>=8 && !args._trA && args._pretransposed_hint); },
+ [](const GemmArgs &args) { return (args._Ksize>=8); },
[](const GemmArgs &args) { return (args._Msize <= 4); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_bf16fp32_mmla_8VLx2, bfloat16, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_bf16fp32_mmla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>=8 && !args._trA && args._pretransposed_hint); },
+ [](const GemmArgs &args) { return (args._Ksize>=8); },
[](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_bf16fp32_mmla_4VLx4, bfloat16, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_bf16fp32_dot_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>=8 && !args._trA && args._pretransposed_hint); },
+ [](const GemmArgs &args) { return (args._Ksize>=8); },
[](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_bf16fp32_dot_4VLx4, bfloat16, float>(args); }
},
-{ // gemm_bf16_native
- GemmMethod::GEMM_NATIVE,
- "native_bf16fp32_dot_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>=8 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); },
- [](const GemmArgs &args) { return new GemmNative<native_bf16fp32_dot_4VLx4, bfloat16, float>(args); }
-},
{ // gemm_bf16_interleaved
GemmMethod::GEMM_INTERLEAVED,
"interleaved_bf16fp32_mmla_3VLx8",
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index 6867a5f4b9..5f2840b243 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -31,14 +31,12 @@
#include "gemm_hybrid.hpp"
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
-#include "gemm_native.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
#include "kernels/a64_hgemm_24x8.hpp"
#include "kernels/a64_sgemm_12x8.hpp"
#include "kernels/sve_hybrid_fp16_mla_4VLx4.hpp"
#include "kernels/sve_interleaved_fp16_mla_3VLx8.hpp"
-#include "kernels/sve_native_fp16_mla_4VLx4.hpp"
namespace arm_gemm {
@@ -47,18 +45,11 @@ static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = {
{
GemmMethod::GEMM_HYBRID,
"hybrid_fp16_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize >= 8) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize >= 8); },
[](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_fp16_mla_4VLx4, __fp16, __fp16>(args); }
},
{
- GemmMethod::GEMM_NATIVE,
- "native_fp16_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize >= 8 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmNative<native_fp16_mla_4VLx4, __fp16, __fp16>(args); }
-},
-{
GemmMethod::GEMM_INTERLEAVED,
"interleaved_fp16_mla_3VLx8",
[](const GemmArgs &args) { return (args._Ksize > 4); },
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 1d5b97b41a..aa206e3f42 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -26,28 +26,22 @@
#include "gemm_hybrid.hpp"
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
-#include "gemm_interleaved_2d.hpp"
#include "gemm_interleaved_pretransposed_2d.hpp"
-#include "gemm_native.hpp"
#include "gemv_batched.hpp"
-#include "gemv_native_transposed.hpp"
#include "gemv_pretransposed.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
#include "kernels/a64_hybrid_fp32_mla_16x4.hpp"
#include "kernels/a64_hybrid_fp32_mla_4x8.hpp"
-#include "kernels/a64_native_fp32_mla_16x4.hpp"
#include "kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp"
#include "kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp"
#include "kernels/a64_sgemm_12x8.hpp"
#include "kernels/a64_sgemv_pretransposed.hpp"
-#include "kernels/a64_sgemv_trans.hpp"
#include "kernels/sve_hybrid_fp32_mla_4VLx4.hpp"
#include "kernels/sve_hybrid_fp32_mmla_4VLx4.hpp"
#include "kernels/sve_interleaved_fp32_mla_3VLx8.hpp"
#include "kernels/sve_interleaved_fp32_mmla_3VLx8.hpp"
-#include "kernels/sve_native_fp32_mla_4VLx4.hpp"
#include "kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp"
namespace arm_gemm {
@@ -65,23 +59,15 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
{
GemmMethod::GEMV_PRETRANSPOSED,
"sgemv_pretransposed",
- [](const GemmArgs &args) { return (args._Msize==1 && args._pretransposed_hint && args._nbatches==1); },
+ [](const GemmArgs &args) { return (args._Msize==1 && args._nbatches==1); },
nullptr,
[](const GemmArgs &args) { return new GemvPretransposed<sgemv_pretransposed, float, float>(args); }
},
-{
- GemmMethod::GEMV_NATIVE_TRANSPOSED,
- "sgemv_trans",
- [](const GemmArgs &args) { return (args._Msize==1 && !args._trA && !args._trB && args._nbatches==1); },
- nullptr,
- [](const GemmArgs &args) { return new GemvNativeTransposed<sgemv_trans, float, float>(args); }
-},
-
#if defined(__ARM_FEATURE_SVE) && defined(MMLA_FP32)
{
GemmMethod::GEMM_HYBRID,
"hybrid_fp32_mmla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize >= 4); },
[](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mmla_4VLx4, float, float>(args); }
},
@@ -95,66 +81,52 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
#endif // __ARM_FEATURE_SVE && MMLA_FP32
#ifdef __ARM_FEATURE_SVE
-// SVE smallk / native / hybrid methods
+// SVE smallk / hybrid methods
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_fp32_mla_1VLx8",
- [](const GemmArgs &args) { return (args._Ksize <= 24) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize <= 24); },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_1VLx8, float, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_fp32_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize >= 4); },
[](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_4VLx4, float, float>(args); }
},
-{
- GemmMethod::GEMM_NATIVE,
- "native_fp32_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>4 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmNative<native_fp32_mla_4VLx4, float, float>(args); }
-},
#endif // __ARM_FEATURE_SVE
-// NEON native / hybrid methods
+// NEON hybrid methods
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_fp32_mla_4x8",
- [](const GemmArgs &args) { return (args._Ksize <= 8) && (args._Nsize % 4)==0 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize <= 8) && (args._Nsize % 4)==0; },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_4x8, float, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_fp32_mla_4x6",
- [](const GemmArgs &args) { return (args._Ksize > 8) && (args._Ksize <= 16) && (args._Nsize % 4)==0 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize > 8) && (args._Ksize <= 16) && (args._Nsize % 4)==0; },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_4x6, float, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_fp32_mla_4x8_normal",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize >= 4); },
[](const GemmArgs &args) { return (args._Nsize < 12); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_4x8, float, float>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_fp32_mla_16x4",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return (args._Ksize >= 4); },
[](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || (args._Msize < 16) || (args._nmulti > 1); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_16x4, float, float>(args); }
},
-{
- GemmMethod::GEMM_NATIVE,
- "native_fp32_mla_16x4",
- [](const GemmArgs &args) { return (args._Ksize>4 && (args._Nsize % 16)==0 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmNative<native_fp32_mla_16x4, float, float>(args); }
-},
#ifdef __ARM_FEATURE_SVE
{
@@ -168,18 +140,10 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
// Pretranposed, 2D split
{
GemmMethod::GEMM_INTERLEAVED_2D,
- "sgemm_12x8_pretranspose_2d",
- [](const GemmArgs &args) { return args._pretransposed_hint; },
- [](const GemmArgs &args) { return args._maxthreads >= 8; },
- [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<sgemm_12x8, float, float>(args); }
-},
-// Non-pretransposed, 2D split (no buffer manager)
-{
- GemmMethod::GEMM_INTERLEAVED_2D,
"sgemm_12x8_2d",
nullptr,
- [](const GemmArgs &args) { return (!args._pretransposed_hint) && (args._maxthreads >= 8); },
- [](const GemmArgs &args) { return new GemmInterleaved2d<sgemm_12x8, float, float>(args); }
+ [](const GemmArgs &args) { return args._maxthreads >= 8; },
+ [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<sgemm_12x8, float, float>(args); }
},
// 1D split (with pretransposed or not)
{
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
index 2c666b63c2..353d681fe2 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid.hpp
@@ -57,8 +57,6 @@ class GemmHybrid : public GemmCommon<To, Tr> {
const unsigned int _nbatches;
const unsigned int _nmulti;
- const bool _trB;
-
const Activation _act;
/* Blocking info */
@@ -72,8 +70,8 @@ class GemmHybrid : public GemmCommon<To, Tr> {
const NDRange<4> _window_range;
static unsigned int compute_k_block(const GemmArgs &args) {
- // Some kernels don't support append mode - these can't do K blocking at all.
- if (!strategy::supports_append()) {
+ // Some kernels don't support accumulate mode - these can't do K blocking at all.
+ if (!strategy::supports_accumulate()) {
return args._Ksize;
}
@@ -135,7 +133,7 @@ public:
/* Constructor */
GemmHybrid(const GemmArgs &args)
: _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
- _nbatches(args._nbatches), _nmulti(args._nmulti), _trB(args._trB),
+ _nbatches(args._nbatches), _nmulti(args._nmulti),
_act(args._act),
_k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
_Mround(roundup(args._Msize, strategy::out_height())),
@@ -243,7 +241,7 @@ public:
const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size;
strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb,
- x0, xmax, k0, kmax, _trB);
+ x0, xmax, k0, kmax);
buffer += size;
}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
index 36545c16ba..915227fc29 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_quantized.hpp
@@ -56,8 +56,6 @@ class GemmHybridQuantized : public GemmCommon<To, Tr> {
const unsigned int _nbatches;
const unsigned int _nmulti;
- const bool _trB;
-
/* Blocking info */
const unsigned int _k_block;
const unsigned int _n_block;
@@ -142,7 +140,7 @@ public:
/* Constructor */
GemmHybridQuantized(const GemmArgs &args, const Requantize32 &qp)
: _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
- _nbatches(args._nbatches), _nmulti(args._nmulti), _trB(args._trB),
+ _nbatches(args._nbatches), _nmulti(args._nmulti),
_k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
_Mround(roundup(args._Msize, strategy::out_height())),
_window_range(iceildiv(args._Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmulti),
@@ -279,7 +277,7 @@ public:
const unsigned int size = roundup(xmax-x0, strategy::out_width()) * k_size;
strat.transforms.PrepareB( buffer, B + (multi * B_multi_stride), ldb,
- x0, xmax, k0, kmax, _trB);
+ x0, xmax, k0, kmax);
buffer += size;
}
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
index 8dd0df5603..d1d137e090 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp
@@ -28,7 +28,6 @@
#include "gemm_hybrid.hpp"
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
-#include "gemm_native.hpp"
#include "kernels/a64_gemm_s16_12x8.hpp"
#include "kernels/a64_gemm_s8_12x8.hpp"
@@ -40,7 +39,6 @@
#include "kernels/sve_hybrid_s8s32_dot_4VLx4.hpp"
#include "kernels/sve_interleaved_s8s32_dot_3VLx8.hpp"
#include "kernels/sve_interleaved_s8s32_mmla_3VLx8.hpp"
-#include "kernels/sve_native_s8s32_dot_4VLx4.hpp"
#include "kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp"
namespace arm_gemm {
@@ -59,25 +57,18 @@ static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_s8s32_dot_1VLx8",
- [](const GemmArgs &args) { return args._Ksize<=64 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._Ksize<=64; },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_s8s32_dot_1VLx8, int8_t, int32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_s8s32_dot_4VLx4",
- [](const GemmArgs &args) { return args._Ksize>=16 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._Ksize>=16; },
[](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_s8s32_dot_4VLx4, int8_t, int32_t>(args); }
},
{
- GemmMethod::GEMM_NATIVE,
- "native_s8s32_dot_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>=16 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); },
- [](const GemmArgs &args) { return new GemmNative<native_s8s32_dot_4VLx4, int8_t, int32_t>(args); }
-},
-{
GemmMethod::GEMM_INTERLEAVED,
"interleaved_s8s32_dot_3VLx8",
[](const GemmArgs &args) { return (args._Ksize>4); },
@@ -97,21 +88,21 @@ static const GemmImplementation<int8_t, int32_t> gemm_s8_methods[] = {
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_s8s32_dot_4x8",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32); },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_s8s32_dot_4x8, int8_t, int32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_s8s32_dot_4x6",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64); },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_s8s32_dot_4x6, int8_t, int32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_s8s32_dot_16x4",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && args._Ksize>=16; },
[](const GemmArgs &args) { return args._Nsize<=256 && args._Ksize>128; },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_s8s32_dot_16x4, int8_t, int32_t>(args); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index f572f7940b..3b829491ca 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -31,7 +31,6 @@
#include "arm_gemm.hpp"
#include "utils.hpp"
-#include "buffer_manager.hpp"
#include "mergeresults.hpp"
#include "transform.hpp"
@@ -65,14 +64,10 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
const unsigned int _nbatches;
const unsigned int _nmulti;
- const bool _trA;
- const bool _trB;
-
const Activation _act;
const int _maxthreads;
int _nthreads;
- const bool _pretransposed;
/* Blocking info */
unsigned int _k_block=0;
@@ -81,7 +76,6 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
/* Working space, pretransposed buffer, buffer manager */
const Toi *_B_transposed=nullptr;
- BufferManager *_bm=nullptr;
void *_working_space=nullptr;
/* We will need to walk through the blocks of B in a few contexts, so
@@ -150,27 +144,100 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
return ROUND_UP(sizeof(Toi) * _k_block * _Mround * _nbatches);
}
- // B working size: 0, 1 or 3 of these needed depending on pretransposed and threading settings.
- size_t get_b_working_size() const {
- return ROUND_UP(sizeof(Toi) * _x_block * _k_block);
- }
-
// C working size: One needed per thread.
size_t get_c_working_size() const {
return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height());
}
- // Internal execute function.
- // This supports both the "pretransposed" and "standard" interfaces via the template parameter.
- template<bool pretransposed>
- void execute_internal(unsigned int start, unsigned int end, int threadid) {
+
+public:
+ GemmInterleaved(GemmInterleaved &) = delete;
+ GemmInterleaved & operator= (GemmInterleaved &) = delete;
+
+ /* Constructor */
+ GemmInterleaved(const GemmArgs &args)
+ : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
+ _nbatches(args._nbatches), _nmulti(args._nmulti),
+ _act(args._act), _maxthreads(args._maxthreads), _nthreads(args._maxthreads) {
+ const unsigned int L1_size = _ci->get_L1_cache_size();
+ const unsigned int L2_size = _ci->get_L2_cache_size();
+
+ assert(_maxthreads > 0);
+
+ // Work out blocking parameters, or override from provided GemmConfig
+ if (args._cfg && args._cfg->inner_block_size) {
+ _k_block = args._cfg->inner_block_size;
+ } else {
+ // k_block: Find out how much of the larger array can be loaded into half the cache.
+ // This should account for associative caches.
+ _k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
+
+ // Needs to be (at least a single) multiple of the K unroll level.
+ _k_block /= strategy::k_unroll();
+ _k_block = std::max(_k_block, 1U) * strategy::k_unroll();
+
+ // Now tune to presented problem size; this is how many blocks we need.
+ unsigned int num_k_blocks = iceildiv(_Ksize, _k_block);
+
+ // So divide the space equally into that many blocks.
+ _k_block = iceildiv(_Ksize, num_k_blocks);
+
+ // And round UP to the K unroll level required.
+ _k_block = iceildiv(_k_block, strategy::k_unroll());
+ _k_block *= strategy::k_unroll();
+ }
+
+ if (args._cfg && args._cfg->outer_block_size) {
+ _x_block = args._cfg->outer_block_size;
+ } else {
+ // x_block: Work out how many rows (of length k_block) will fit in the L2
+ // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
+ _x_block = (((L2_size * 9) / 10) - (_k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) /
+ (sizeof(Toi) * _k_block);
+
+ // Needs to be (at least a single) multiple of the kernel output width.
+ _x_block /= strategy::out_width();
+ _x_block = std::max(_x_block, 1U) * strategy::out_width();
+
+ // And tune to the presented problem size.
+ unsigned int num_x_blocks = iceildiv(_Nsize, _x_block);
+ _x_block = iceildiv(_Nsize, num_x_blocks);
+
+ _x_block = iceildiv(_x_block, strategy::out_width());
+ _x_block *= strategy::out_width();
+ }
+
+ // Work out the rounded size of M - needed for some buffers.
+ _Mround = iceildiv(_Msize, strategy::out_height());
+ _Mround *= strategy::out_height();
+ }
+
+ // Interface implementation - Compulsory functions
+
+ // Window size: Only the last thread should do a ragged block, so dole
+ // out work in units of out_height. Factor batches into the window, but
+ // not multi for now (as this would cause problems with the buffer
+ // manager).
+ ndrange_t get_window_size() const override {
+ // _Mround is a multiple of out_height by definition.
+ return { (_Mround / strategy::out_height()) * _nbatches };
+ }
+
+ // set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads.
+ void set_nthreads(int nthreads) override {
+ _nthreads = std::min(nthreads, _maxthreads);
+ }
+
+ // Execute
+ void execute(const ndcoord_t &work_range, const ndcoord_t &, int threadid) override {
+ const auto start = work_range.get_position(0);
+ const auto end = work_range.get_position_end(0);
#ifdef CYCLE_PROFILING
profiler prof;
#endif
strategy strat(_ci);
blockwalker current(*this);
- blockwalker next=current;
/* Translate 'start' and 'end' into a position within the batches and rows. */
const unsigned int window_per_batch = _Mround / strategy::out_height();
@@ -182,12 +249,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
unsigned int m_max = (end - (batch_end * window_per_batch)) * strategy::out_height();
/* Make sure we've been set up correctly. */
- if (pretransposed) {
- assert(_B_transposed);
- } else {
- assert(_bm);
- }
-
+ assert(_B_transposed);
assert(_working_space);
int8_t *working_space_bytes = reinterpret_cast<int8_t *>(_working_space);
@@ -198,12 +260,8 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size()));
Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size()));
- // Shared buffers - these come either from BufferManager or _B_transposed.
const Toi *b_panel;
-
- if (pretransposed) {
- b_panel = _B_transposed;
- }
+ b_panel = _B_transposed;
//printf("Starting GEMM loop, x_block=%d, k_block=%d\n", _x_block, _k_block);
@@ -224,7 +282,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
strat.transforms.PrepareA(a_panel + ((batch * _Mround + first_m) * _k_block),
this->_Aptr + (batch * this->_A_batch_stride) + (current.multi() * this->_A_multi_stride),
- this->_lda, first_m, last_m, current.k0(), current.kmax(), _trA);
+ this->_lda, first_m, last_m, current.k0(), current.kmax());
}
// Figure out how many "K" the kernel will actually process.
@@ -234,41 +292,6 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
int bblocks = iceildiv(current.xmax() - current.x0(), strategy::out_width());
- if (!pretransposed) {
- /* Look ahead to the next block and populate it if necessary.
- * This avoids the populate operation becoming a bottleneck, and
- * helps keep the threads synchronized (the first thread to get
- * here will populate while the rest will advance).
- *
- * If we are running single threaded, bm->try_populate() will do
- * nothing.
- */
- if (next.advance()) {
- _bm->try_populate(next.index(), [&](void *buffer) {
-#ifdef CYCLE_PROFILING
- auto p=prof.ScopedProfiler(PROFILE_PREPB, (next.xmax()-next.x0()) * (next.kmax()-next.k0()) * sizeof(Toi));
-#endif
-
- Toi *b_panel = reinterpret_cast<Toi *>(buffer);
-
- strat.transforms.PrepareB(b_panel, this->_Bptr + (next.multi() * this->_B_multi_stride), this->_ldb,
- next.x0(), next.xmax(), next.k0(), next.kmax(), _trB);
- });
- }
-
- /* Get the buffer for this iteration from the BufferManager. */
- b_panel = reinterpret_cast<Toi *>(_bm->get(current.index(), [&](void *bpv) {
-#ifdef CYCLE_PROFILING
- auto p=prof.ScopedProfiler(PROFILE_PREPB, (current.xmax()-current.x0()) * (current.kmax()-current.k0()) * sizeof(Toi));
-#endif
-
- Toi *b_panel = reinterpret_cast<Toi *>(bpv);
-
- strat.transforms.PrepareB(b_panel, this->_Bptr + (current.multi() * this->_B_multi_stride), this->_ldb,
- current.x0(), current.xmax(), current.k0(), current.kmax(), _trB);
- }));
- }
-
/* Do the actual work. */
for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
unsigned int first_m = (batch == batch_0) ? m_0 : 0;
@@ -308,105 +331,7 @@ class GemmInterleaved : public GemmCommon<To, Tr> {
}
}
- if (pretransposed) {
- b_panel += (bblocks * strat.out_width() * kern_k);
- } else {
- _bm->release(current.index());
- }
- }
- }
-
-public:
- GemmInterleaved(GemmInterleaved &) = delete;
- GemmInterleaved & operator= (GemmInterleaved &) = delete;
-
- /* Constructor */
- GemmInterleaved(const GemmArgs &args)
- : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
- _nbatches(args._nbatches), _nmulti(args._nmulti), _trA(args._trA), _trB(args._trB),
- _act(args._act), _maxthreads(args._maxthreads), _nthreads(args._maxthreads),
- _pretransposed(args._pretransposed_hint) {
- const unsigned int L1_size = _ci->get_L1_cache_size();
- const unsigned int L2_size = _ci->get_L2_cache_size();
-
- assert(_maxthreads > 0);
-
- // Work out blocking parameters, or override from provided GemmConfig
- if (args._cfg && args._cfg->inner_block_size) {
- _k_block = args._cfg->inner_block_size;
- } else {
- // k_block: Find out how much of the larger array can be loaded into half the cache.
- // This should account for associative caches.
- _k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
-
- // Needs to be (at least a single) multiple of the K unroll level.
- _k_block /= strategy::k_unroll();
- _k_block = std::max(_k_block, 1U) * strategy::k_unroll();
-
- // Now tune to presented problem size; this is how many blocks we need.
- unsigned int num_k_blocks = iceildiv(_Ksize, _k_block);
-
- // So divide the space equally into that many blocks.
- _k_block = iceildiv(_Ksize, num_k_blocks);
-
- // And round UP to the K unroll level required.
- _k_block = iceildiv(_k_block, strategy::k_unroll());
- _k_block *= strategy::k_unroll();
- }
-
- if (args._cfg && args._cfg->outer_block_size) {
- _x_block = args._cfg->outer_block_size;
- } else {
- // x_block: Work out how many rows (of length k_block) will fit in the L2
- // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
- _x_block = (((L2_size * 9) / 10) - (_k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) /
- (sizeof(Toi) * _k_block);
-
- // Needs to be (at least a single) multiple of the kernel output width.
- _x_block /= strategy::out_width();
- _x_block = std::max(_x_block, 1U) * strategy::out_width();
-
- // And tune to the presented problem size.
- unsigned int num_x_blocks = iceildiv(_Nsize, _x_block);
- _x_block = iceildiv(_Nsize, num_x_blocks);
-
- _x_block = iceildiv(_x_block, strategy::out_width());
- _x_block *= strategy::out_width();
- }
-
- // Work out the rounded size of M - needed for some buffers.
- _Mround = iceildiv(_Msize, strategy::out_height());
- _Mround *= strategy::out_height();
- }
-
- // Interface implementation - Compulsory functions
-
- // Window size: Only the last thread should do a ragged block, so dole
- // out work in units of out_height. Factor batches into the window, but
- // not multi for now (as this would cause problems with the buffer
- // manager).
- ndrange_t get_window_size() const override {
- // _Mround is a multiple of out_height by definition.
- return { (_Mround / strategy::out_height()) * _nbatches };
- }
-
- // set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads.
- void set_nthreads(int nthreads) override {
- _nthreads = std::min(nthreads, _maxthreads);
- if (_bm) {
- _bm->set_nthreads(_nthreads);
- }
- }
-
- // Execute
- void execute(const ndcoord_t &work_range, const ndcoord_t &, int threadid) override {
- const auto start = work_range.get_position(0);
- const auto end = work_range.get_position_end(0);
-
- if (_pretransposed) {
- execute_internal<true>(start, end, threadid);
- } else {
- execute_internal<false>(start, end, threadid);
+ b_panel += (bblocks * strat.out_width() * kern_k);
}
}
@@ -415,12 +340,6 @@ public:
// In all cases, we need one A buffer plus a C buffer per thread.
size_t size = get_a_working_size() + (get_c_working_size() * _maxthreads);
- // For pretransposed case, there is no working space needed for B.
- // Otherwise, we need a BufferManager.
- if (!_pretransposed) {
- size += BufferManager::get_storage_requirement(_maxthreads, get_b_working_size());
- }
-
size += 64; // Add on a cache line extra for alignment.
return size;
@@ -439,29 +358,17 @@ public:
working_space_bytes += diff;
- if (_pretransposed) {
- // Pretransposed case: just set internal pointer to parameter value.
- _working_space = reinterpret_cast<void *>(working_space_bytes);
- } else {
- // Otherwise, use the first part of the working space for the buffer manager.
- // It's legal to call this again so don't leak a buffer manager if it already existed.
- delete _bm;
-
- _bm = new BufferManager(_nthreads, get_b_working_size(), reinterpret_cast<void *>(working_space_bytes));
-
- working_space_bytes += BufferManager::get_storage_requirement(_maxthreads, get_b_working_size());
-
- _working_space = reinterpret_cast<void *>(working_space_bytes);
- }
+ // Pretransposed case: just set internal pointer to parameter value.
+ _working_space = reinterpret_cast<void *>(working_space_bytes);
}
// Interface implementation - pretransposed
bool B_is_pretransposed() const override {
- return _pretransposed;
+ return true;
}
bool B_pretranspose_required() const override {
- return _pretransposed && (_B_transposed==nullptr);
+ return (_B_transposed==nullptr);
}
// TODO: this could almost certainly be considerably simpler.
@@ -506,7 +413,7 @@ public:
k_size *= strategy::k_unroll();
strat.transforms.PrepareB(buffer, B + (current.multi() * B_multi_stride), ldb,
- current.x0(), current.xmax(), current.k0(), current.kmax(), _trB);
+ current.x0(), current.xmax(), current.k0(), current.kmax());
buffer += (x_size * k_size);
} while (current.advance());
@@ -515,10 +422,6 @@ public:
void set_pretransposed_B_data(void *in_buffer) override {
_B_transposed = reinterpret_cast<Toi *>(in_buffer);
}
-
- ~GemmInterleaved() override {
- delete _bm;
- }
};
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_2d.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved_2d.hpp
deleted file mode 100644
index 376d19cc65..0000000000
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_2d.hpp
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright (c) 2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#include "arm_gemm.hpp"
-#include "utils.hpp"
-
-#include "mergeresults.hpp"
-#include "transform.hpp"
-
-#ifdef CYCLE_PROFILING
-#include "profiler.hpp"
-#endif
-
-#include <algorithm>
-#include <cassert>
-
-// Some macros used to decide how much working space to allocate.
-// Round allocations up to the next cache line.
-#define ALLOC_ROUND 64
-#define ROUND_UP(x) ((((x) + ALLOC_ROUND-1) / ALLOC_ROUND) * ALLOC_ROUND)
-
-// Implementation of the GemmCommon abstract class.
-//
-// This implementation interleaves the source matrices in blocks - good for
-// larger matrices.
-namespace arm_gemm {
-
-template<typename strategy, typename To, typename Tr>
-class GemmInterleaved2d : public GemmCommon<To, Tr> {
- typedef typename strategy::operand_type Toi;
- typedef typename strategy::result_type Tri;
-
- /* const properties set by constructor */
- const CPUInfo * const _ci;
-
- const unsigned int _Msize;
- const unsigned int _Nsize;
- const unsigned int _Ksize;
-
- const unsigned int _nbatches;
- const unsigned int _nmulti;
-
- const bool _trA;
- const bool _trB;
-
- const Activation _act;
-
- const int _maxthreads;
- int _nthreads;
-
- /* Blocking info */
- unsigned int _k_block=0;
- unsigned int _x_block=0;
-
- unsigned int _Mround_div=0;
- unsigned int _Mround=0;
- unsigned int _Nround_div=0;
- unsigned int _Nround=0;
-
- /* Working space, pretransposed buffer */
- void *_working_space=nullptr;
-
- /* We will need to walk through the blocks of B in a few contexts, so
- * factor that out. */
- class blockwalker {
- private:
- /* Size loops, etc. based on our parent's configuration */
- const GemmInterleaved2d<strategy, To, Tr> &_parent;
-
- /* K, X and multi parameters for current iteration. */
- unsigned int _k0=0, _x0=0, _xmin=0, _xmax=0, _multi=0;
-
- unsigned int _index=0;
- bool _done=false;
- bool _newkblock=true;
- bool _newmulti=true;
-
- public:
- blockwalker(const GemmInterleaved2d<strategy, To, Tr> &parent)
- : _parent(parent)
- , _xmax { parent._Nsize }
- { }
-
- blockwalker(const GemmInterleaved2d<strategy, To, Tr> &parent, unsigned int x0, unsigned int xmax)
- : _parent(parent)
- , _x0 { x0 }
- , _xmin { x0 }
- , _xmax { xmax }
- {
- assert(_x0 <= _xmax);
- }
-
- unsigned int xmax() {
- return std::min(_x0 + _parent._x_block, _xmax);
- }
-
- unsigned int kmax() {
- return std::min(_k0 + _parent._k_block, _parent._Ksize);
- }
-
- /* Advance to the next block, return false at the end. */
- bool advance(void) {
- if (_done) {
- return false;
- }
-
- _newkblock=false;
- _x0 += _parent._x_block;
- if (_x0 >= _xmax) {
- _x0=_xmin;
- _k0 += _parent._k_block;
- if (_k0 >= _parent._Ksize) {
- _k0=0;
- _multi++;
- if (_multi >= _parent._nmulti) {
- _done=true;
- return false;
- }
- _newmulti=true;
- }
- _newkblock=true;
- }
- _index++;
-
- return true;
- }
-
- unsigned int k0(void) { return _k0; }
- unsigned int x0(void) { return _x0; }
- unsigned int multi(void) { return _multi; }
- unsigned int index(void) { return _index; }
- bool done(void) { return _done; }
- bool newkblock(void) { return _newkblock; }
- };
-
- // A working size: One of these needed, regardless of thread count. Divided according to window.
- size_t get_a_working_size() const {
- return ROUND_UP(sizeof(Toi) * _k_block * _Mround * _nbatches) * 2;
- }
-
- // B working size: 0, 1 or 3 of these needed depending on pretransposed and threading settings.
- size_t get_b_working_size() const {
- return ROUND_UP(sizeof(Toi) * _x_block * _k_block);
- }
-
- // C working size: One needed per thread.
- size_t get_c_working_size() const {
- return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height());
- }
-
- void execute_transpose(unsigned int m_start, unsigned int m_end, unsigned int n_start, unsigned int n_end, int threadid, int, int nthreadid) {
- strategy strat(_ci);
-
- /* Translate 'start' and 'end' into a position within the batches and rows. */
- const unsigned int window_per_batch = _Mround / strategy::out_height();
- unsigned int batch_0 = m_start / window_per_batch;
- unsigned int batch_end = m_end / window_per_batch;
-
- /* Compute the M values to operate on */
- unsigned int m_0 = (m_start - (batch_0 * window_per_batch)) * strategy::out_height();
- unsigned int m_max = (m_end - (batch_end * window_per_batch)) * strategy::out_height();
-
- unsigned int n_0 = std::min(this->_Nsize, strategy::out_width() * n_start);
- unsigned int n_max = std::min(this->_Nsize, strategy::out_width() * n_end);
-
- blockwalker current(*this, n_0, n_max);
-
- /* get workspace as int8_t */
- assert(_working_space);
- int8_t *working_space_bytes = reinterpret_cast<int8_t *>(_working_space);
-
- auto c_panel_start = working_space_bytes;
- auto a_panel_start = c_panel_start + get_c_working_size() * _maxthreads;
- auto b_panel_start = a_panel_start + get_a_working_size() * _maxthreads;
-
- auto c_panel = reinterpret_cast<Tri *>(c_panel_start + get_c_working_size() * threadid);
- auto a_panel = reinterpret_cast<Toi *>(a_panel_start + get_a_working_size() * nthreadid);
- auto b_panel = reinterpret_cast<Toi *>(b_panel_start + get_b_working_size() * threadid);
-
-
- // newkblock() is always true on the first iteration, so this will be set properly on the first loop.
-
- int kern_k = 0;
- for (;!current.done();current.advance()) {
- const int bblocks = iceildiv(current.xmax() - current.x0(), strategy::out_width());
- /*
- * The entirity of A^kblock is transpose upfront and computed against individual
- * blocks of B (xblock)
- *
- * Therefore, we only need to retranspose when k_block progresses
- */
- if (current.newkblock()) {
- for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
- unsigned int first_m = (batch == batch_0) ? m_0 : 0;
- unsigned int last_m = (batch == batch_end) ? m_max : _Msize;
-
- if (first_m >= last_m)
- continue;
-
- auto a_thread_panel_in = this->_Aptr
- + (batch * this->_A_batch_stride)
- + (current.multi() * this->_A_multi_stride);
-
- auto a_thread_panel_out = a_panel + ((batch * _Mround + first_m) * _k_block);
-
- strat.transforms.PrepareA(
- a_thread_panel_out,
- a_thread_panel_in,
- this->_lda,
- first_m,
- last_m,
- current.k0(),
- current.kmax(),
- _trA);
- }
-
- kern_k = iceildiv(current.kmax() - current.k0(), strategy::k_unroll());
- kern_k *= strat.k_unroll();
- }
-
- auto *b_panel_in = this->_Bptr + (current.multi() * this->_B_multi_stride);
-
- strat.transforms.PrepareB(
- b_panel, //dst
- b_panel_in, //src
- this->_ldb,
- current.x0(), //idx from
- current.xmax(), //idx to
- current.k0(),
- current.kmax(),
- _trB);
-
- //Iterate over the batches
- for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
- unsigned int first_m = (batch == batch_0) ? m_0 : 0;
- unsigned int last_m = (batch == batch_end) ? m_max : _Msize;
-
- if (first_m >= last_m)
- continue;
-
- const Toi *a_ptr = a_panel + (batch * _Mround + first_m) * _k_block;
-
-
- //Iterate over the inerleaved rows of the packed A matrix
- for (unsigned int y=first_m; y<last_m; y+=strategy::out_height()) {
- unsigned int ymax = std::min(_Msize, y + strategy::out_height());
-
- strat.kernel(a_ptr, b_panel, c_panel, 1, bblocks, kern_k);
- a_ptr += (strategy::out_height() * kern_k);
-
- const bool first_pass = current.k0()==0;
- const bool last_pass = current.kmax()==_Ksize;
-
- auto c_panel_out = this->_Cptr
- + this->_C_batch_stride * batch
- + this->_C_multi_stride * current.multi();
-
- auto bias = (first_pass && this->_bias)
- ? this->_bias + (current.multi() * this->_bias_multi_stride)
- : nullptr;
-
- auto act = last_pass ? _act : Activation();
-
- strat.transforms.Merge(
- c_panel_out,
- c_panel,
- this->_ldc,
- y,
- ymax,
- current.x0(),
- current.xmax(),
- bias,
- act,
- !first_pass); //Append
- }
- }
- }
- }
-public:
- GemmInterleaved2d(GemmInterleaved2d &) = delete;
- GemmInterleaved2d & operator= (GemmInterleaved2d &) = delete;
-
- /* Constructor */
- /* Constructor */
- GemmInterleaved2d(const GemmArgs &args)
- : _ci(args._ci)
- , _Msize(args._Msize)
- , _Nsize(args._Nsize)
- , _Ksize(args._Ksize)
- , _nbatches(args._nbatches)
- , _nmulti(args._nmulti)
- , _trA(args._trA)
- , _trB(args._trB)
- , _act(args._act)
- , _maxthreads(args._maxthreads)
- , _nthreads(args._maxthreads)
-
- // Work out the rounded size of M - needed for some buffers.
- , _Mround_div ( iceildiv(_Msize, strategy::out_height()) )
- , _Mround ( _Mround_div * strategy::out_height() )
-
- , _Nround_div ( iceildiv(_Nsize, strategy::out_width()) )
- , _Nround ( _Nround_div * strategy::out_width() )
- {
- const unsigned int L1_size = _ci->get_L1_cache_size();
- const unsigned int L2_size = _ci->get_L2_cache_size();
-
- assert(_maxthreads > 0);
-
- // Work out blocking parameters, or override from provided GemmConfig
- if (args._cfg && args._cfg->inner_block_size) {
- _k_block = args._cfg->inner_block_size;
- } else {
- // k_block: Find out how much of the larger array can be loaded into half the cache.
- // This should account for associative caches.
- _k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
-
- // Needs to be (at least a single) multiple of the K unroll level.
- _k_block /= strategy::k_unroll();
- _k_block = std::max(_k_block, 1U) * strategy::k_unroll();
-
- // Now tune to presented problem size; this is how many blocks we need.
- unsigned int num_k_blocks = iceildiv(_Ksize, _k_block);
-
- // So divide the space equally into that many blocks.
- _k_block = iceildiv(_Ksize, num_k_blocks);
-
- // And round UP to the K unroll level required.
- _k_block = iceildiv(_k_block, strategy::k_unroll());
- _k_block *= strategy::k_unroll();
- }
-
- if (args._cfg && args._cfg->outer_block_size) {
- _x_block = args._cfg->outer_block_size;
- } else {
- // x_block: Work out how many rows (of length k_block) will fit in the L2
- // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
- _x_block = (((L2_size * 9) / 10) - (_k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) /
- (sizeof(Toi) * _k_block);
-
- // Needs to be (at least a single) multiple of the kernel output width.
- _x_block /= strategy::out_width();
- _x_block = std::max(_x_block, 1U) * strategy::out_width();
-
- // And tune to the presented problem size.
- unsigned int num_x_blocks = iceildiv(_Nsize, _x_block);
- _x_block = iceildiv(_Nsize, num_x_blocks);
-
- _x_block = iceildiv(_x_block, strategy::out_width());
- _x_block *= strategy::out_width();
- }
-
- // Work out the rounded size of M - needed for some buffers.
- }
-
- // Interface implementation - Compulsory functions
- ndrange_t get_window_size() const override {
- unsigned m = (_Mround / strategy::out_height()) * _nbatches;
- unsigned n = _Nround_div;
-
- return { m, n };
- }
-
- // set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads.
- void set_nthreads(int nthreads) override {
- _nthreads = std::min(nthreads, _maxthreads);
- }
-
- void execute(const ndcoord_t& work_range, const ndcoord_t& thread_locator, int threadid) override {
- /*
- * This particular GEMM implementation can only be broken up over the M & N
- * dimensions, we inform the frame work of this limitation via the get_window_size function
- */
- const auto m_start = work_range.get_position(0);
- const auto n_start = work_range.get_position(1);
- const auto m_size = work_range.get_size(0);
- const auto n_size = work_range.get_size(1);
- const auto m_end = m_start + m_size;
- const auto n_end = n_start + n_size;
-
- const auto m_threadid = thread_locator.get_position(0);
- const auto n_threadid = thread_locator.get_position(1);
-
- execute_transpose(m_start, m_end, n_start, n_end, threadid, m_threadid, n_threadid);
- }
-
- std::size_t get_working_size()const override {
- /*
- * Because we do not know how schedular will break up
- * the task, we need to ensure that alloc enough
- * space to be able to handle the case where every thread
- * is parallelised across B AND also every thrread is parallelised across A
- *
- * If we parallelise across A, then we only need one buffer of A and 64 buffers of B
- * If we parallelise across B, then we only need 64 buffer of B and
- */
- return get_c_working_size() * _maxthreads
- + get_a_working_size() * _maxthreads
- + get_b_working_size() * _maxthreads
- + 64; //to account for cacheline alignment
- }
-
-
- void set_working_space(void *working_space) override {
- // Make sure everything ends up cache line aligned
- int8_t *working_space_bytes = reinterpret_cast<int8_t *>(working_space);
- intptr_t working_space_int = reinterpret_cast<intptr_t>(working_space);
-
- size_t diff=0;
-
- if (working_space_int & 0x3F) {
- diff = 0x40 - (working_space_int & 0x3F);
- }
-
- working_space_bytes += diff;
-
- _working_space = reinterpret_cast<void *>(working_space_bytes);
- }
-
- ~GemmInterleaved2d() override { }
-};
-
-} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
index 38fb26370c..97e16a61c1 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved_pretransposed_2d.hpp
@@ -62,9 +62,6 @@ class GemmInterleavedPretransposed2d : public GemmCommon<To, Tr> {
const unsigned int _nbatches;
const unsigned int _nmulti;
- const bool _trA;
- const bool _trB;
-
const Activation _act;
const int _maxthreads;
@@ -252,8 +249,7 @@ class GemmInterleavedPretransposed2d : public GemmCommon<To, Tr> {
first_m,
last_m,
current.k0(),
- current.kmax(),
- _trA);
+ current.kmax());
}
}
@@ -317,8 +313,6 @@ public:
, _Ksize(args._Ksize)
, _nbatches(args._nbatches)
, _nmulti(args._nmulti)
- , _trA(args._trA)
- , _trB(args._trB)
, _act(args._act)
, _maxthreads(args._maxthreads)
, _nthreads(args._maxthreads)
@@ -330,8 +324,6 @@ public:
, _Nround_div ( iceildiv(_Nsize, strategy::out_width()) )
, _Nround ( _Nround_div * strategy::out_width() )
{
-
- assert(args._pretransposed_hint);
assert(_maxthreads > 0);
const unsigned int L1_size = _ci->get_L1_cache_size();
@@ -411,7 +403,7 @@ public:
execute_pretranspose(m_start, m_end, n_start, n_end, threadid, m_threadid, n_threadid);
}
- std::size_t get_working_size()const override {
+ std::size_t get_working_size() const override {
/* Because we do not know how schedular will break up
* the task, we need to ensure that alloc enough
* space to be able to handle the case where every thread
@@ -493,7 +485,7 @@ public:
k_size *= strategy::k_unroll();
strat.transforms.PrepareB(buffer, B + (current.multi() * B_multi_stride), ldb,
- current.x0(), current.xmax(), current.k0(), current.kmax(), _trB);
+ current.x0(), current.xmax(), current.k0(), current.kmax());
buffer += (x_size * k_size);
} while (current.advance());
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_native.hpp b/src/core/NEON/kernels/arm_gemm/gemm_native.hpp
deleted file mode 100644
index cddbd51e32..0000000000
--- a/src/core/NEON/kernels/arm_gemm/gemm_native.hpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#include <stdio.h>
-
-#include "arm_gemm.hpp"
-
-#include "ndrange.hpp"
-
-#ifdef CYCLE_PROFILING
-#include "profiler.hpp"
-#endif
-
-namespace arm_gemm {
-
-// Implementation of the GemmCommon abstract class.
-//
-// This is implementation is for native GEMM with no transposition.
-//
-// By default the source data is used in-place, but if type conversion is
-// needed we need to allocate working space (CURRENTLY NOT IMPLEMENTED).
-
-template<typename strategy, typename To, typename Tr>
-class GemmNative : public GemmCommon<To, Tr> {
- typedef typename strategy::operand_type Toi;
- typedef typename strategy::result_type Tri;
-
- const unsigned int _Msize;
- const unsigned int _Nsize;
- const unsigned int _Ksize;
-
- const unsigned int _nbatches;
- const unsigned int _nmultis;
-
- const Activation _act;
-
- const CPUInfo * const _ci;
-
- const unsigned int _k_block;
- const unsigned int _n_block;
-
- const NDRange<4> _window_range;
-
- static unsigned int compute_k_block(const GemmArgs &args) {
- return args._Ksize;
- }
-
- static unsigned int compute_n_block(const GemmArgs &args) {
- if ((args._cfg != nullptr) && args._cfg->outer_block_size > 0) {
- return args._cfg->outer_block_size;
- } else {
- return args._Nsize;
- }
- }
-
-public:
- GemmNative(GemmNative &) = delete;
- GemmNative & operator= (GemmNative &) = delete;
-
- GemmNative(const GemmArgs &args)
- : _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
- _nbatches(args._nbatches), _nmultis(args._nmulti),
- _act(args._act), _ci(args._ci),
- _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
- _window_range(iceildiv(_Msize, strategy::out_height()), _nbatches, iceildiv(_Nsize, _n_block), _nmultis) { }
-
- // Window is amount per multi multiplied by total number of multis.
- ndrange_t get_window_size() const override {
- return { _window_range.total_size() };
- }
-
- // Native GEMMs can always be dynamically scheduled (whether requested or not)
- bool supports_dynamic_scheduling() const override {
- return true;
- }
-
- // Actually execute the GEMM.
- void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
-#ifdef CYCLE_PROFILING
- profiler prof;
-#endif
- strategy strat(_ci);
-
- static_assert(std::is_same<To, Toi>::value, "gemm_native: Operand types must be the same.");
- static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
-
- auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
-
- if (p.done()) {
- return;
- }
-
- do {
- unsigned int y0 = p.dim(0) * strategy::out_height();
- unsigned int ymax = std::min(p.dim0_max() * strategy::out_height(), _Msize);
- unsigned int batch = p.dim(1);
- unsigned int n0 = p.dim(2) * _n_block;
- unsigned int nmax = std::min(n0 + _n_block, _Nsize);
- unsigned int multi = p.dim(3);
-
-#ifdef CYCLE_PROFILING
- auto p = prof.ScopedProfiler(PROFILE_KERNEL, (ymax-y0) * (nmax - n0) * _Ksize);
-#endif
-
- strat.kernel(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + (y0 * this->_lda), this->_lda,
- this->_Bptr + (multi * this->_B_multi_stride) + n0, this->_ldb,
- this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (y0 * this->_ldc) + n0, this->_ldc,
- (ymax-y0), (nmax-n0), _Ksize,
- (strategy::supports_bias() && this->_bias) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
- _act, false);
-
- // Add bias externally if needed
- if (!strategy::supports_bias() && this->_bias) {
- bias_adder(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (y0 * this->_ldc) + n0, this->_ldc,
- this->_bias + (multi * this->_bias_multi_stride) + n0,
- (ymax - y0), (nmax - n0));
- }
- } while (p.next_dim1());
- }
-};
-
-} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
index 73d0c272a6..67f28d38e2 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp
@@ -42,14 +42,14 @@ static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_s8s32_dot_1VLx8",
- [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64; },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_s8s32_dot_1VLx8, int8_t, int8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_s8s32_dot_4VLx4",
- [](const GemmArgs &args, const Requantize32 &) { return args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._Ksize>=16; },
[](const GemmArgs &args, const Requantize32 &) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<hybrid_s8s32_dot_4VLx4, int8_t, int8_t>(args, qp); }
},
@@ -57,21 +57,21 @@ static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_s8s32_dot_4x8",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32); },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_s8s32_dot_4x8, int8_t, int8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_s8s32_dot_4x6",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64); },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_s8s32_dot_4x6, int8_t, int8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_s8s32_dot_16x4",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && args._Ksize>=16; },
[](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<hybrid_s8s32_dot_16x4, int8_t, int8_t>(args, qp); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
index 59cd1704ff..b9e2bf6c26 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp
@@ -42,14 +42,14 @@ static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_meth
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_1VLx8",
- [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64; },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_1VLx8, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_u8u32_dot_4VLx4",
- [](const GemmArgs &args, const Requantize32 &) { return args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._Ksize>=16; },
[](const GemmArgs &args, const Requantize32 &) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_4VLx4, uint8_t, uint8_t>(args, qp); }
},
@@ -57,21 +57,21 @@ static const GemmImplementation<uint8_t, uint8_t, Requantize32> gemm_quint8_meth
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_4x8",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32); },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x8, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"smallK_hybrid_u8u32_dot_4x6",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64); },
nullptr,
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<smallK_hybrid_u8u32_dot_4x6, uint8_t, uint8_t>(args, qp); }
},
{
GemmMethod::GEMM_HYBRID_QUANTIZED,
"hybrid_u8u32_dot_16x4",
- [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod() && args._Ksize>=16; },
[](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; },
[](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<hybrid_u8u32_dot_16x4, uint8_t, uint8_t>(args, qp); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
index d74f335e38..d5a9e585b5 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp
@@ -28,7 +28,6 @@
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
#include "gemm_hybrid.hpp"
-#include "gemm_native.hpp"
#include "kernels/a64_gemm_u16_12x8.hpp"
#include "kernels/a64_gemm_u8_12x8.hpp"
@@ -40,7 +39,6 @@
#include "kernels/sve_hybrid_u8u32_dot_4VLx4.hpp"
#include "kernels/sve_interleaved_u8u32_dot_3VLx8.hpp"
#include "kernels/sve_interleaved_u8u32_mmla_3VLx8.hpp"
-#include "kernels/sve_native_u8u32_dot_4VLx4.hpp"
#include "kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp"
namespace arm_gemm {
@@ -59,25 +57,18 @@ static const GemmImplementation<uint8_t, uint32_t> gemm_u8_methods[] = {
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_u8u32_dot_1VLx8",
- [](const GemmArgs &args) { return args._Ksize<=64 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._Ksize<=64; },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_u8u32_dot_1VLx8, uint8_t, uint32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_u8u32_dot_4VLx4",
- [](const GemmArgs &args) { return args._Ksize>=16 && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._Ksize>=16; },
[](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_u8u32_dot_4VLx4, uint8_t, uint32_t>(args); }
},
{
- GemmMethod::GEMM_NATIVE,
- "native_u8u32_dot_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>=16 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); },
- [](const GemmArgs &args) { return new GemmNative<native_u8u32_dot_4VLx4, uint8_t, uint32_t>(args); }
-},
-{
GemmMethod::GEMM_INTERLEAVED,
"interleaved_u8u32_dot_3VLx8",
[](const GemmArgs &args) { return (args._Ksize>4); },
@@ -97,21 +88,21 @@ static const GemmImplementation<uint8_t, uint32_t> gemm_u8_methods[] = {
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_u8u32_dot_4x8",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize<=32); },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_u8u32_dot_4x8, uint8_t, uint32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"smallK_hybrid_u8u32_dot_4x6",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64) && !args._trA && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && (args._Nsize % 4 == 0) && (args._Ksize>32) && (args._Ksize<=64); },
nullptr,
[](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_u8u32_dot_4x6, uint8_t, uint32_t>(args); }
},
{
GemmMethod::GEMM_HYBRID,
"hybrid_u8u32_dot_16x4",
- [](const GemmArgs &args) { return args._ci->has_dotprod() && args._Ksize>=16 && !args._trA && !args._trB && args._pretransposed_hint; },
+ [](const GemmArgs &args) { return args._ci->has_dotprod() && args._Ksize>=16; },
[](const GemmArgs &args) { return args._Nsize<=256 && args._Ksize>128; },
[](const GemmArgs &args) { return new GemmHybrid<hybrid_u8u32_dot_16x4, uint8_t, uint32_t>(args); }
},
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_native_transposed.hpp b/src/core/NEON/kernels/arm_gemm/gemv_native_transposed.hpp
deleted file mode 100644
index 9209d48bd9..0000000000
--- a/src/core/NEON/kernels/arm_gemm/gemv_native_transposed.hpp
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#include <stdio.h>
-
-#include "arm_gemm.hpp"
-
-#include "mergeresults.hpp"
-#include "transform.hpp"
-
-#ifdef CYCLE_PROFILING
-#include "profiler.hpp"
-#endif
-
-namespace arm_gemm {
-
-// Implementation of the GemmCommon abstract class.
-//
-// This is implementation is for a "native" (no-transform) GEMV with a
-// transposed matrix.
-//
-// As a native operation the source data is used in-place, so the internal
-// and external operand/result types must match.
-template<typename strategy, typename To, typename Tr>
-class GemvNativeTransposed : public GemmCommon<To, Tr> {
- typedef typename strategy::operand_type Toi;
- typedef typename strategy::result_type Tri;
-
- const unsigned int _Nsize;
- const unsigned int _Ksize;
-
- const unsigned int _nmultis;
-
- const Activation _act;
-
- const CPUInfo * const _ci;
-
- unsigned int m_block=0;
- unsigned int n_block=0;
-
-public:
- GemvNativeTransposed(GemvNativeTransposed &) = delete;
- GemvNativeTransposed & operator= (GemvNativeTransposed &) = delete;
-
- GemvNativeTransposed(const GemmArgs &args)
- : _Nsize(args._Nsize), _Ksize(args._Ksize), _nmultis(args._nmulti), _act(args._act), _ci(args._ci) {
- /* For now don't do any blocking. TODO: figure out if we should. */
- m_block = _Ksize;
- n_block = _Nsize;
- }
-
- // Window is number of out_width blocks times number of multis.
- ndrange_t get_window_size() const override {
- return { iceildiv(_Nsize, strategy::out_width()) * _nmultis };
- }
-
- // Actually execute the GEMV.
- void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
-#ifdef CYCLE_PROFILING
- profiler prof;
-#endif
- strategy strat(_ci);
-
- const auto start = work_range.get_position(0);
- const auto end = work_range.get_position_end(0);
-
- const unsigned int window_per_multi = iceildiv(_Nsize, strategy::out_width());
- const unsigned int multi_0 = start / window_per_multi;
- const unsigned int multi_end = end / window_per_multi;
-
- const unsigned int n_0 = (start - (multi_0 * window_per_multi)) * strategy::out_width();
- const unsigned int n_max = (end - (multi_end * window_per_multi)) * strategy::out_width();
-
- static_assert(std::is_same<To, Toi>::value, "gemv_transposed: Operand types must be the same.");
- static_assert(std::is_same<Tr, Tri>::value, "gemv_transposed: Result types must be the same.");
-
- for (unsigned int multi=multi_0; multi<=multi_end; multi++) {
- const unsigned int n_start = (multi==multi_0) ? n_0 : 0;
- const unsigned int n_end = (multi==multi_end) ? n_max : _Nsize;
-
- if (n_end <= n_start)
- continue;
-
- for (unsigned int m0=0; m0<_Ksize; m0+=m_block) {
- unsigned int mmax = std::min(m0 + m_block, _Ksize);
-
- for (unsigned int n0=n_start; n0<n_end; n0+=n_block) {
- unsigned int nmax = std::min(n0 + n_block, n_end);
-#ifdef CYCLE_PROFILING
- auto p = prof.ScopedProfiler(PROFILE_KERNEL, (mmax-m0) * (nmax-n0));
-#endif
- strat.kernel(this->_Bptr + (multi * this->_B_multi_stride) + (m0 * this->_ldb) + n0,
- this->_Aptr + (multi * this->_A_multi_stride) + m0,
- this->_Cptr + (multi * this->_C_multi_stride) + n0,
- static_cast<Tr>(0), this->_ldb, (mmax-m0), (nmax-n0));
-
- // Handle activation separately for now
- if (this->_bias) {
- activator<true>(this->_Cptr + (multi * this->_C_multi_stride) + n0, 0,
- this->_bias + (multi * this->_bias_multi_stride) + n0,
- _act, 1, (nmax-n0));
- } else {
- activator<false>(this->_Cptr + (multi * this->_C_multi_stride) + n0, 0,
- static_cast<const Tr *>(nullptr),
- _act, 1, (nmax-n0));
- }
- }
- }
- }
- }
-};
-
-} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
index 945e363839..47909cdaeb 100644
--- a/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemv_pretransposed.hpp
@@ -51,8 +51,6 @@ class GemvPretransposed : public GemmCommon<To, Tr> {
const unsigned int _nmultis;
- const bool _trB;
-
const Activation _act;
const CPUInfo * const _ci;
@@ -69,7 +67,7 @@ public:
GemvPretransposed & operator= (GemvPretransposed &) = delete;
GemvPretransposed(const GemmArgs &args)
- : _Nsize(args._Nsize), _Ksize(args._Ksize), _nmultis(args._nmulti), _trB(args._trB), _act(args._act), _ci(args._ci),
+ : _Nsize(args._Nsize), _Ksize(args._Ksize), _nmultis(args._nmulti), _act(args._act), _ci(args._ci),
_buffer_per_multi(_Ksize * iceildiv(_Nsize, strategy::A_interleave()) * strategy::A_interleave()) {
/* For now don't do any blocking. TODO: figure out if we should. */
if (args._cfg && args._cfg->inner_block_size) {
@@ -169,7 +167,7 @@ public:
/* Reverse sense here as we are dealing with B rather than A. So if
* strategy::A_transpose is false and _trB is false, we still
* transpose. */
- if (_trB ^ strategy::A_transpose()) {
+ if (strategy::A_transpose()) {
Transform<strategy::A_interleave(), strategy::A_block(), false>(A_buffer + (multi * _buffer_per_multi), B + (multi * B_multi_stride), ldb, 0, _Nsize, 0, _Ksize);
} else {
Transform<strategy::A_interleave(), strategy::A_block(), true>(A_buffer + (multi * _buffer_per_multi), B + (multi * B_multi_stride), ldb, 0, _Nsize, 0, _Ksize);
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4.hpp
index 5e5b6bd4c8..8d8ede8137 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4.hpp
@@ -60,7 +60,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/a55.cpp
index 1b828ee503..94fcd1064e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/a55.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -40,7 +40,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
K -= (regs_count + 1) * 4;
const long blocks_count = K / 1;
float nullbias[16];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (16 * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -89,7 +89,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
float result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(float);
float *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -108,7 +108,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
"temploadreg1 .req X1\n"
"temploadreg2 .req X2\n"
"temploadreg3 .req X3\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -481,7 +481,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -495,7 +495,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
"temploadreg3 .req X5\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -993,7 +993,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -1011,7 +1011,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -1634,7 +1634,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1657,7 +1657,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -2406,7 +2406,7 @@ void a64_hybrid_fp32_mla_16x4_a55(const float *A, int lda, const float *B, float
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/generic.cpp
index 43ff3a98dc..016bef4b9d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -40,7 +40,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
K -= (regs_count + 1) * 4;
const long blocks_count = K / 1;
float nullbias[16];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (16 * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -89,7 +89,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
float result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(float);
float *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -104,7 +104,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
switch(rows_to_compute) {
case 1:
__asm __volatile (
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -334,7 +334,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
"str q19, [%[c_ptr0], #0x30]\n"
"add %[c_ptr0], %[c_ptr0], #0x40\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
);
break;
@@ -344,7 +344,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
"c_ptr1 .req X1\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -693,7 +693,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
);
break;
@@ -707,7 +707,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -1175,7 +1175,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1194,7 +1194,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -1781,7 +1781,7 @@ void a64_hybrid_fp32_mla_16x4(const float *A, int lda, const float *B, float *C,
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp
index f4fba227d6..3f1df76a6a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_16x4/x1.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -40,7 +40,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
K -= (regs_count + 1) * 4;
const long blocks_count = K / 1;
float nullbias[16];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (16 * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -89,7 +89,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
float result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(float);
float *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -104,7 +104,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
switch(rows_to_compute) {
case 1:
__asm __volatile (
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -336,7 +336,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
"str q19, [%[c_ptr0], #0x30]\n"
"add %[c_ptr0], %[c_ptr0], #0x40\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
);
break;
@@ -346,7 +346,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
"c_ptr1 .req X1\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -697,7 +697,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
);
break;
@@ -711,7 +711,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -1181,7 +1181,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1200,7 +1200,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ldr q16, [%[biasptr]]\n"
"ldr q17, [%[biasptr], #0x10]\n"
"ldr q18, [%[biasptr], #0x20]\n"
@@ -1789,7 +1789,7 @@ void a64_hybrid_fp32_mla_16x4_x1(const float *A, int lda, const float *B, float
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp
index d11a945d27..b60401b70d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8.hpp
@@ -58,7 +58,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp
index 731230364d..7442d258ec 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_4x8/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -40,7 +40,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
K -= (regs_count + 1) * 4;
const long blocks_count = K / 1;
float nullbias[4];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (4 * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -89,7 +89,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
float result_buffer[32];
const unsigned long ldcb = (use_result_buffer ? 4 : ldc) * sizeof(float);
float *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 8); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 4 + cx] = c_ptr_real[cy * ldc + cx];
@@ -179,7 +179,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
"str q24, [%[c_ptr0]]\n"
"add %[c_ptr0], %[c_ptr0], #0x10\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
);
break;
@@ -302,7 +302,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
);
break;
@@ -467,7 +467,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -674,7 +674,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -922,7 +922,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr3\n"
".unreq c_ptr4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1211,7 +1211,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr4\n"
".unreq c_ptr5\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
@@ -1541,7 +1541,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr5\n"
".unreq c_ptr6\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "cc", "memory"
);
break;
@@ -1913,7 +1913,7 @@ void a64_hybrid_fp32_mla_4x8(const float *A, int lda, const float *B, float *C,
".unreq c_ptr6\n"
".unreq c_ptr7\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
index 4a9f7985b7..a23101a7ce 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4.hpp
@@ -59,7 +59,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
index 6c7e89559c..4a7cdc59a7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/a55.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool append) {
+void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -70,7 +70,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
int32_t result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(int32_t);
int32_t *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -88,7 +88,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg1 .req X1\n"
"temploadreg2 .req X2\n"
"temploadreg3 .req X3\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -469,7 +469,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -483,7 +483,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"temploadreg3 .req X5\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -988,7 +988,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -1006,7 +1006,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1636,7 +1636,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1659,7 +1659,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -2413,7 +2413,7 @@ void a64_hybrid_s8s32_dot_16x4_a55(const int8_t *A, int lda, const int8_t *B, in
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
index 797ab74498..da39a32690 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_16x4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool append) {
+void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -70,7 +70,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
int32_t result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(int32_t);
int32_t *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -84,7 +84,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
switch(rows_to_compute) {
case 1:
__asm __volatile (
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -322,7 +322,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"str q19, [%[c_ptr0], #0x30]\n"
"add %[c_ptr0], %[c_ptr0], #0x40\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
);
break;
@@ -332,7 +332,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"c_ptr1 .req X1\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -688,7 +688,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
);
break;
@@ -702,7 +702,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1176,7 +1176,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1195,7 +1195,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1787,7 +1787,7 @@ void a64_hybrid_s8s32_dot_16x4(const int8_t *A, int lda, const int8_t *B, int32_
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4.hpp
index cdeb5e8b36..e5a88b4519 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4.hpp
@@ -59,7 +59,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/a55.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/a55.cpp
index 91870e2e54..735e5fd45a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/a55.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/a55.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool append) {
+void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -70,7 +70,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
uint32_t result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(uint32_t);
uint32_t *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -88,7 +88,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
"temploadreg1 .req X1\n"
"temploadreg2 .req X2\n"
"temploadreg3 .req X3\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -469,7 +469,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -483,7 +483,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
"temploadreg3 .req X5\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -988,7 +988,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -1006,7 +1006,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1636,7 +1636,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1659,7 +1659,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -2413,7 +2413,7 @@ void a64_hybrid_u8u32_dot_16x4_a55(const uint8_t *A, int lda, const uint8_t *B,
".unreq temploadreg2\n"
".unreq temploadreg3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/generic.cpp
index 0436547af0..2e86233a06 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_16x4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool append) {
+void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -70,7 +70,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
uint32_t result_buffer[64];
const unsigned long ldcb = (use_result_buffer ? 16 : ldc) * sizeof(uint32_t);
uint32_t *c_ptr_real = c_ptr0;
- if (use_result_buffer && append) {
+ if (use_result_buffer && accumulate) {
for(int cy=0; cy<std::min(M-y, 4); cy++) {
for(unsigned int cx=0; cx<width; cx++) {
result_buffer[cy * 16 + cx] = c_ptr_real[cy * ldc + cx];
@@ -84,7 +84,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
switch(rows_to_compute) {
case 1:
__asm __volatile (
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -322,7 +322,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
"str q19, [%[c_ptr0], #0x30]\n"
"add %[c_ptr0], %[c_ptr0], #0x40\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
);
break;
@@ -332,7 +332,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
"c_ptr1 .req X1\n"
"add a_ptr1, %[a_ptr0], %[lda]\n"
"add c_ptr1, %[c_ptr0], %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -688,7 +688,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
);
break;
@@ -702,7 +702,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
"add c_ptr1, %[c_ptr0], %[ldc]\n"
"add a_ptr2, a_ptr1, %[lda]\n"
"add c_ptr2, c_ptr1, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1176,7 +1176,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1195,7 +1195,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
"add c_ptr2, c_ptr1, %[ldc]\n"
"add a_ptr3, a_ptr2, %[lda]\n"
"add c_ptr3, c_ptr2, %[ldc]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"movi v16.4s, #0\n"
"ldr q0, [%[a_ptr0]]\n"
"movi v17.4s, #0\n"
@@ -1787,7 +1787,7 @@ void a64_hybrid_u8u32_dot_16x4(const uint8_t *A, int lda, const uint8_t *B, uint
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb)
: "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4.hpp
deleted file mode 100644
index d7bf43deca..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __aarch64__
-
-
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void a64_native_fp32_mla_16x4(const float *, int, const float *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
-class native_fp32_mla_16x4
-{
-public:
- typedef float operand_type;
- typedef float result_type;
-
- typedef void (*kern_type)(const float *, int, const float *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return 16;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 1;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return true;
- }
-
- static constexpr bool supports_activation()
- {
- return true;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=a64_native_fp32_mla_16x4;
-
- native_fp32_mla_16x4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4/generic.cpp
deleted file mode 100644
index 82e7333ee3..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_native_fp32_mla_16x4/generic.cpp
+++ /dev/null
@@ -1,1708 +0,0 @@
-/*
- * Copyright (c) 2018-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __aarch64__
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void a64_native_fp32_mla_16x4(const float *A, int lda, const float *B, int ldb, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
- const long loops_count = ((K + 4) / 8) - 1;
- K -= loops_count * 8;
- const long regs_count = (K / 4) - 1;
- K -= (regs_count + 1) * 4;
- const long blocks_count = K / 1;
- float nullbias[16];
- if (!append && !bias) {
- memset(nullbias, 0, (16 * sizeof(float)));
- }
- float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
- float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
- const float * const minptr = &minval;
- const float * const maxptr = &maxval;
-
- switch(act.type)
- {
- default:
- case Activation::Type::None:
- break;
- case Activation::Type::BoundedReLU:
- maxval = static_cast<float>(act.param1);
- /* fall through */
- case Activation::Type::ReLU:
- minval = 0.0f;
- break;
- }
-
- for (int y=0; y<M; y+=4) {
- const float * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(float);
-
- float *c_ptr0 = C + (y * ldc);
-
- for (int x0=0; x0<N; x0+=16ul) {
- const long width = std::min((unsigned long)N-x0, 16ul);
- long loops = loops_count;
- long regs = regs_count;
- long blocks = blocks_count;
- const float *a_ptr0 = a_ptr0_base;
- const float *b_ptr0 = B + x0;
- long ldbb = ldb * sizeof(float);
- const unsigned long ldcb = ldc * sizeof(float);
- const float *biasptr = bias ? bias+x0 : nullbias;
-
- switch(M-y) {
- case 1:
- __asm __volatile (
- "ldr q16, [%[biasptr]]\n"
- "ldr q17, [%[biasptr], #0x10]\n"
- "ldr q18, [%[biasptr], #0x20]\n"
- "ldr q19, [%[biasptr], #0x30]\n"
- "ldr q0, [%[a_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ldr q8, [%[b_ptr0]]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "ldr q0, [%[a_ptr0], #-0x10]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "cbz %[regs], 3f\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "b 4f\n"
- "3:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr s0, [%[a_ptr0]]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x4\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "ld1r {v14.4s}, [%[minptr]]\n"
- "ld1r {v15.4s}, [%[maxptr]]\n"
- "fmax v16.4s, v16.4s, v14.4s\n"
- "fmax v17.4s, v17.4s, v14.4s\n"
- "fmax v18.4s, v18.4s, v14.4s\n"
- "fmax v19.4s, v19.4s, v14.4s\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
- "fmin v17.4s, v17.4s, v15.4s\n"
- "fmin v18.4s, v18.4s, v15.4s\n"
- "fmin v19.4s, v19.4s, v15.4s\n"
- "str q16, [%[c_ptr0]]\n"
- "str q17, [%[c_ptr0], #0x10]\n"
- "str q18, [%[c_ptr0], #0x20]\n"
- "str q19, [%[c_ptr0], #0x30]\n"
- "add %[c_ptr0], %[c_ptr0], #0x40\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [ldb] "r" (ldbb)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "ldr q16, [%[biasptr]]\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "ldr q17, [%[biasptr], #0x10]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov v20.16b, v16.16b\n"
- "ldr q18, [%[biasptr], #0x20]\n"
- "ldr q19, [%[biasptr], #0x30]\n"
- "mov v21.16b, v17.16b\n"
- "ldr q0, [%[a_ptr0]]\n"
- "ldr q1, [a_ptr1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov v22.16b, v18.16b\n"
- "ldr q8, [%[b_ptr0]]\n"
- "mov v23.16b, v19.16b\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "ldr q0, [%[a_ptr0], #-0x10]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "ldr q1, [a_ptr1, #-0x10]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "prfm PSTL1KEEP, [c_ptr1]\n"
- "cbz %[regs], 3f\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "b 4f\n"
- "3:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr s0, [%[a_ptr0]]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x4\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr s1, [a_ptr1]\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add a_ptr1, a_ptr1, #0x4\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "ld1r {v14.4s}, [%[minptr]]\n"
- "ld1r {v15.4s}, [%[maxptr]]\n"
- "fmax v16.4s, v16.4s, v14.4s\n"
- "fmax v17.4s, v17.4s, v14.4s\n"
- "fmax v18.4s, v18.4s, v14.4s\n"
- "fmax v19.4s, v19.4s, v14.4s\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
- "fmin v17.4s, v17.4s, v15.4s\n"
- "fmin v18.4s, v18.4s, v15.4s\n"
- "fmin v19.4s, v19.4s, v15.4s\n"
- "str q16, [%[c_ptr0]]\n"
- "fmax v20.4s, v20.4s, v14.4s\n"
- "fmax v21.4s, v21.4s, v14.4s\n"
- "fmax v22.4s, v22.4s, v14.4s\n"
- "str q17, [%[c_ptr0], #0x10]\n"
- "fmax v23.4s, v23.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v15.4s\n"
- "fmin v21.4s, v21.4s, v15.4s\n"
- "str q18, [%[c_ptr0], #0x20]\n"
- "fmin v22.4s, v22.4s, v15.4s\n"
- "fmin v23.4s, v23.4s, v15.4s\n"
- "str q19, [%[c_ptr0], #0x30]\n"
- "add %[c_ptr0], %[c_ptr0], #0x40\n"
- "str q20, [c_ptr1]\n"
- "str q21, [c_ptr1, #0x10]\n"
- "str q22, [c_ptr1, #0x20]\n"
- "str q23, [c_ptr1, #0x30]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [ldb] "r" (ldbb)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "ldr q16, [%[biasptr]]\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "ldr q17, [%[biasptr], #0x10]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov v20.16b, v16.16b\n"
- "ldr q18, [%[biasptr], #0x20]\n"
- "mov v24.16b, v16.16b\n"
- "ldr q19, [%[biasptr], #0x30]\n"
- "mov v21.16b, v17.16b\n"
- "ldr q0, [%[a_ptr0]]\n"
- "mov v25.16b, v17.16b\n"
- "ldr q1, [a_ptr1]\n"
- "mov v22.16b, v18.16b\n"
- "ldr q2, [a_ptr2]\n"
- "mov v23.16b, v19.16b\n"
- "ldr q8, [%[b_ptr0]]\n"
- "mov v26.16b, v18.16b\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "mov v27.16b, v19.16b\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "ldr q6, [a_ptr2]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "ldr q0, [%[a_ptr0], #-0x10]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "ldr q1, [a_ptr1, #-0x10]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "ldr q2, [a_ptr2, #-0x10]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v24.4s, v8.4s, v6.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "fmla v26.4s, v10.4s, v6.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "fmla v27.4s, v11.4s, v6.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "fmla v24.4s, v12.4s, v6.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "fmla v25.4s, v13.4s, v6.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "fmla v26.4s, v14.4s, v6.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "fmla v27.4s, v15.4s, v6.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "fmla v24.4s, v8.4s, v6.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "fmla v25.4s, v9.4s, v6.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "fmla v26.4s, v10.4s, v6.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "fmla v27.4s, v11.4s, v6.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "fmla v24.4s, v12.4s, v6.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "fmla v25.4s, v13.4s, v6.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "fmla v26.4s, v14.4s, v6.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "fmla v27.4s, v15.4s, v6.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "prfm PSTL1KEEP, [c_ptr1]\n"
- "prfm PSTL1KEEP, [c_ptr2]\n"
- "cbz %[regs], 3f\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "ldr q6, [a_ptr2]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "fmla v24.4s, v8.4s, v6.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "fmla v26.4s, v10.4s, v6.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "fmla v27.4s, v11.4s, v6.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "fmla v24.4s, v12.4s, v6.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "fmla v25.4s, v13.4s, v6.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "fmla v26.4s, v14.4s, v6.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "fmla v27.4s, v15.4s, v6.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "fmla v24.4s, v8.4s, v6.s[2]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "fmla v25.4s, v9.4s, v6.s[2]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "fmla v26.4s, v10.4s, v6.s[2]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "fmla v27.4s, v11.4s, v6.s[2]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "fmla v24.4s, v12.4s, v6.s[3]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "fmla v25.4s, v13.4s, v6.s[3]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "fmla v26.4s, v14.4s, v6.s[3]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "fmla v27.4s, v15.4s, v6.s[3]\n"
- "b 4f\n"
- "3:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr s0, [%[a_ptr0]]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x4\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr s1, [a_ptr1]\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add a_ptr1, a_ptr1, #0x4\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr s2, [a_ptr2]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add a_ptr2, a_ptr2, #0x4\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "ld1r {v14.4s}, [%[minptr]]\n"
- "ld1r {v15.4s}, [%[maxptr]]\n"
- "fmax v16.4s, v16.4s, v14.4s\n"
- "fmax v17.4s, v17.4s, v14.4s\n"
- "fmax v18.4s, v18.4s, v14.4s\n"
- "fmax v19.4s, v19.4s, v14.4s\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
- "fmin v17.4s, v17.4s, v15.4s\n"
- "fmin v18.4s, v18.4s, v15.4s\n"
- "fmin v19.4s, v19.4s, v15.4s\n"
- "str q16, [%[c_ptr0]]\n"
- "fmax v20.4s, v20.4s, v14.4s\n"
- "fmax v21.4s, v21.4s, v14.4s\n"
- "fmax v22.4s, v22.4s, v14.4s\n"
- "str q17, [%[c_ptr0], #0x10]\n"
- "fmax v23.4s, v23.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v15.4s\n"
- "fmin v21.4s, v21.4s, v15.4s\n"
- "str q18, [%[c_ptr0], #0x20]\n"
- "fmin v22.4s, v22.4s, v15.4s\n"
- "fmin v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v14.4s\n"
- "str q19, [%[c_ptr0], #0x30]\n"
- "fmax v25.4s, v25.4s, v14.4s\n"
- "add %[c_ptr0], %[c_ptr0], #0x40\n"
- "fmax v26.4s, v26.4s, v14.4s\n"
- "str q20, [c_ptr1]\n"
- "fmin v24.4s, v24.4s, v15.4s\n"
- "fmin v25.4s, v25.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v14.4s\n"
- "str q21, [c_ptr1, #0x10]\n"
- "fmin v26.4s, v26.4s, v15.4s\n"
- "fmin v27.4s, v27.4s, v15.4s\n"
- "str q22, [c_ptr1, #0x20]\n"
- "str q23, [c_ptr1, #0x30]\n"
- "str q24, [c_ptr2]\n"
- "str q25, [c_ptr2, #0x10]\n"
- "str q26, [c_ptr2, #0x20]\n"
- "str q27, [c_ptr2, #0x30]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [ldb] "r" (ldbb)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "ldr q16, [%[biasptr]]\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "ldr q17, [%[biasptr], #0x10]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov v20.16b, v16.16b\n"
- "ldr q18, [%[biasptr], #0x20]\n"
- "mov v24.16b, v16.16b\n"
- "ldr q19, [%[biasptr], #0x30]\n"
- "mov v21.16b, v17.16b\n"
- "ldr q0, [%[a_ptr0]]\n"
- "mov v25.16b, v17.16b\n"
- "ldr q1, [a_ptr1]\n"
- "mov v22.16b, v18.16b\n"
- "ldr q2, [a_ptr2]\n"
- "mov v23.16b, v19.16b\n"
- "ldr q8, [%[b_ptr0]]\n"
- "mov v26.16b, v18.16b\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "mov v27.16b, v19.16b\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "mov v28.16b, v16.16b\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "mov v29.16b, v17.16b\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "mov v30.16b, v18.16b\n"
- "ldr q3, [a_ptr3]\n"
- "mov v31.16b, v19.16b\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v28.4s, v8.4s, v3.s[0]\n"
- "ldr q6, [a_ptr2]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "ldr q7, [a_ptr3]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v29.4s, v9.4s, v3.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "prfm PLDL1KEEP, [%[a_ptr0], #0x40]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla v30.4s, v10.4s, v3.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- "fmla v31.4s, v11.4s, v3.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "prfm PLDL1KEEP, [a_ptr1, #0x40]\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "prfm PLDL1KEEP, [a_ptr2, #0x40]\n"
- "fmla v28.4s, v12.4s, v3.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "prfm PLDL1KEEP, [a_ptr3, #0x40]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "fmla v29.4s, v13.4s, v3.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "fmla v30.4s, v14.4s, v3.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "fmla v31.4s, v15.4s, v3.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "fmla v28.4s, v8.4s, v3.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "fmla v29.4s, v9.4s, v3.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "fmla v30.4s, v10.4s, v3.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "fmla v31.4s, v11.4s, v3.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "fmla v28.4s, v12.4s, v3.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "fmla v29.4s, v13.4s, v3.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "fmla v30.4s, v14.4s, v3.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "ldr q0, [%[a_ptr0], #-0x10]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "ldr q1, [a_ptr1, #-0x10]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "ldr q2, [a_ptr2, #-0x10]\n"
- "fmla v31.4s, v15.4s, v3.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "ldr q3, [a_ptr3, #-0x10]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v24.4s, v8.4s, v6.s[0]\n"
- "fmla v28.4s, v8.4s, v7.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[0]\n"
- "fmla v29.4s, v9.4s, v7.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "fmla v26.4s, v10.4s, v6.s[0]\n"
- "fmla v30.4s, v10.4s, v7.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "fmla v27.4s, v11.4s, v6.s[0]\n"
- "fmla v31.4s, v11.4s, v7.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "fmla v24.4s, v12.4s, v6.s[1]\n"
- "fmla v28.4s, v12.4s, v7.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "fmla v25.4s, v13.4s, v6.s[1]\n"
- "fmla v29.4s, v13.4s, v7.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "fmla v26.4s, v14.4s, v6.s[1]\n"
- "fmla v30.4s, v14.4s, v7.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "fmla v27.4s, v15.4s, v6.s[1]\n"
- "fmla v31.4s, v15.4s, v7.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "fmla v24.4s, v8.4s, v6.s[2]\n"
- "fmla v28.4s, v8.4s, v7.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "fmla v25.4s, v9.4s, v6.s[2]\n"
- "fmla v29.4s, v9.4s, v7.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "fmla v26.4s, v10.4s, v6.s[2]\n"
- "fmla v30.4s, v10.4s, v7.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "fmla v27.4s, v11.4s, v6.s[2]\n"
- "fmla v31.4s, v11.4s, v7.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "fmla v24.4s, v12.4s, v6.s[3]\n"
- "fmla v28.4s, v12.4s, v7.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "fmla v25.4s, v13.4s, v6.s[3]\n"
- "fmla v29.4s, v13.4s, v7.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "fmla v26.4s, v14.4s, v6.s[3]\n"
- "fmla v30.4s, v14.4s, v7.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "fmla v27.4s, v15.4s, v6.s[3]\n"
- "fmla v31.4s, v15.4s, v7.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "prfm PSTL1KEEP, [%[c_ptr0]]\n"
- "prfm PSTL1KEEP, [c_ptr1]\n"
- "prfm PSTL1KEEP, [c_ptr2]\n"
- "prfm PSTL1KEEP, [c_ptr3]\n"
- "cbz %[regs], 3f\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q4, [%[a_ptr0]]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr q5, [a_ptr1]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "ldr q6, [a_ptr2]\n"
- "fmla v28.4s, v8.4s, v3.s[0]\n"
- "ldr q7, [a_ptr3]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "fmla v29.4s, v9.4s, v3.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "fmla v30.4s, v10.4s, v3.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "fmla v31.4s, v11.4s, v3.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "fmla v28.4s, v12.4s, v3.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "fmla v29.4s, v13.4s, v3.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "fmla v30.4s, v14.4s, v3.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "fmla v31.4s, v15.4s, v3.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "fmla v28.4s, v8.4s, v3.s[2]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "fmla v29.4s, v9.4s, v3.s[2]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "fmla v30.4s, v10.4s, v3.s[2]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "fmla v31.4s, v11.4s, v3.s[2]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "fmla v28.4s, v12.4s, v3.s[3]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "fmla v29.4s, v13.4s, v3.s[3]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "fmla v30.4s, v14.4s, v3.s[3]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "fmla v31.4s, v15.4s, v3.s[3]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v5.s[0]\n"
- "fmla v24.4s, v8.4s, v6.s[0]\n"
- "fmla v28.4s, v8.4s, v7.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v4.s[0]\n"
- "fmla v21.4s, v9.4s, v5.s[0]\n"
- "fmla v25.4s, v9.4s, v6.s[0]\n"
- "fmla v29.4s, v9.4s, v7.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v4.s[0]\n"
- "fmla v22.4s, v10.4s, v5.s[0]\n"
- "fmla v26.4s, v10.4s, v6.s[0]\n"
- "fmla v30.4s, v10.4s, v7.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v4.s[0]\n"
- "fmla v23.4s, v11.4s, v5.s[0]\n"
- "fmla v27.4s, v11.4s, v6.s[0]\n"
- "fmla v31.4s, v11.4s, v7.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v5.s[1]\n"
- "fmla v24.4s, v12.4s, v6.s[1]\n"
- "fmla v28.4s, v12.4s, v7.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v4.s[1]\n"
- "fmla v21.4s, v13.4s, v5.s[1]\n"
- "fmla v25.4s, v13.4s, v6.s[1]\n"
- "fmla v29.4s, v13.4s, v7.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v4.s[1]\n"
- "fmla v22.4s, v14.4s, v5.s[1]\n"
- "fmla v26.4s, v14.4s, v6.s[1]\n"
- "fmla v30.4s, v14.4s, v7.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v4.s[1]\n"
- "fmla v23.4s, v15.4s, v5.s[1]\n"
- "fmla v27.4s, v15.4s, v6.s[1]\n"
- "fmla v31.4s, v15.4s, v7.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v4.s[2]\n"
- "fmla v20.4s, v8.4s, v5.s[2]\n"
- "fmla v24.4s, v8.4s, v6.s[2]\n"
- "fmla v28.4s, v8.4s, v7.s[2]\n"
- "fmla v17.4s, v9.4s, v4.s[2]\n"
- "fmla v21.4s, v9.4s, v5.s[2]\n"
- "fmla v25.4s, v9.4s, v6.s[2]\n"
- "fmla v29.4s, v9.4s, v7.s[2]\n"
- "fmla v18.4s, v10.4s, v4.s[2]\n"
- "fmla v22.4s, v10.4s, v5.s[2]\n"
- "fmla v26.4s, v10.4s, v6.s[2]\n"
- "fmla v30.4s, v10.4s, v7.s[2]\n"
- "fmla v19.4s, v11.4s, v4.s[2]\n"
- "fmla v23.4s, v11.4s, v5.s[2]\n"
- "fmla v27.4s, v11.4s, v6.s[2]\n"
- "fmla v31.4s, v11.4s, v7.s[2]\n"
- "fmla v16.4s, v12.4s, v4.s[3]\n"
- "fmla v20.4s, v12.4s, v5.s[3]\n"
- "fmla v24.4s, v12.4s, v6.s[3]\n"
- "fmla v28.4s, v12.4s, v7.s[3]\n"
- "fmla v17.4s, v13.4s, v4.s[3]\n"
- "fmla v21.4s, v13.4s, v5.s[3]\n"
- "fmla v25.4s, v13.4s, v6.s[3]\n"
- "fmla v29.4s, v13.4s, v7.s[3]\n"
- "fmla v18.4s, v14.4s, v4.s[3]\n"
- "fmla v22.4s, v14.4s, v5.s[3]\n"
- "fmla v26.4s, v14.4s, v6.s[3]\n"
- "fmla v30.4s, v14.4s, v7.s[3]\n"
- "fmla v19.4s, v15.4s, v4.s[3]\n"
- "fmla v23.4s, v15.4s, v5.s[3]\n"
- "fmla v27.4s, v15.4s, v6.s[3]\n"
- "fmla v31.4s, v15.4s, v7.s[3]\n"
- "b 4f\n"
- "3:\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "fmla v28.4s, v8.4s, v3.s[0]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "fmla v29.4s, v9.4s, v3.s[0]\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "fmla v30.4s, v10.4s, v3.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "fmla v31.4s, v11.4s, v3.s[0]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v12.4s, v0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla v20.4s, v12.4s, v1.s[1]\n"
- "fmla v24.4s, v12.4s, v2.s[1]\n"
- "fmla v28.4s, v12.4s, v3.s[1]\n"
- "ldr q12, [%[b_ptr0]]\n"
- "fmla v17.4s, v13.4s, v0.s[1]\n"
- "fmla v21.4s, v13.4s, v1.s[1]\n"
- "fmla v25.4s, v13.4s, v2.s[1]\n"
- "fmla v29.4s, v13.4s, v3.s[1]\n"
- "ldr q13, [%[b_ptr0], #0x10]\n"
- "fmla v18.4s, v14.4s, v0.s[1]\n"
- "fmla v22.4s, v14.4s, v1.s[1]\n"
- "fmla v26.4s, v14.4s, v2.s[1]\n"
- "fmla v30.4s, v14.4s, v3.s[1]\n"
- "ldr q14, [%[b_ptr0], #0x20]\n"
- "fmla v19.4s, v15.4s, v0.s[1]\n"
- "fmla v23.4s, v15.4s, v1.s[1]\n"
- "fmla v27.4s, v15.4s, v2.s[1]\n"
- "fmla v31.4s, v15.4s, v3.s[1]\n"
- "ldr q15, [%[b_ptr0], #0x30]\n"
- "fmla v16.4s, v8.4s, v0.s[2]\n"
- "fmla v20.4s, v8.4s, v1.s[2]\n"
- "fmla v24.4s, v8.4s, v2.s[2]\n"
- "fmla v28.4s, v8.4s, v3.s[2]\n"
- "fmla v17.4s, v9.4s, v0.s[2]\n"
- "fmla v21.4s, v9.4s, v1.s[2]\n"
- "fmla v25.4s, v9.4s, v2.s[2]\n"
- "fmla v29.4s, v9.4s, v3.s[2]\n"
- "fmla v18.4s, v10.4s, v0.s[2]\n"
- "fmla v22.4s, v10.4s, v1.s[2]\n"
- "fmla v26.4s, v10.4s, v2.s[2]\n"
- "fmla v30.4s, v10.4s, v3.s[2]\n"
- "fmla v19.4s, v11.4s, v0.s[2]\n"
- "fmla v23.4s, v11.4s, v1.s[2]\n"
- "fmla v27.4s, v11.4s, v2.s[2]\n"
- "fmla v31.4s, v11.4s, v3.s[2]\n"
- "fmla v16.4s, v12.4s, v0.s[3]\n"
- "fmla v20.4s, v12.4s, v1.s[3]\n"
- "fmla v24.4s, v12.4s, v2.s[3]\n"
- "fmla v28.4s, v12.4s, v3.s[3]\n"
- "fmla v17.4s, v13.4s, v0.s[3]\n"
- "fmla v21.4s, v13.4s, v1.s[3]\n"
- "fmla v25.4s, v13.4s, v2.s[3]\n"
- "fmla v29.4s, v13.4s, v3.s[3]\n"
- "fmla v18.4s, v14.4s, v0.s[3]\n"
- "fmla v22.4s, v14.4s, v1.s[3]\n"
- "fmla v26.4s, v14.4s, v2.s[3]\n"
- "fmla v30.4s, v14.4s, v3.s[3]\n"
- "fmla v19.4s, v15.4s, v0.s[3]\n"
- "fmla v23.4s, v15.4s, v1.s[3]\n"
- "fmla v27.4s, v15.4s, v2.s[3]\n"
- "fmla v31.4s, v15.4s, v3.s[3]\n"
- "4:\n"
- "cbz %[blocks], 5f\n"
- "6:\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ldr s0, [%[a_ptr0]]\n"
- "ldr q8, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x4\n"
- "ldr q9, [%[b_ptr0], #0x10]\n"
- "ldr s1, [a_ptr1]\n"
- "fmla v16.4s, v8.4s, v0.s[0]\n"
- "ldr q10, [%[b_ptr0], #0x20]\n"
- "ldr q11, [%[b_ptr0], #0x30]\n"
- "add a_ptr1, a_ptr1, #0x4\n"
- "fmla v20.4s, v8.4s, v1.s[0]\n"
- "ldr s2, [a_ptr2]\n"
- "fmla v17.4s, v9.4s, v0.s[0]\n"
- "add a_ptr2, a_ptr2, #0x4\n"
- "fmla v21.4s, v9.4s, v1.s[0]\n"
- "ldr s3, [a_ptr3]\n"
- "fmla v24.4s, v8.4s, v2.s[0]\n"
- "add a_ptr3, a_ptr3, #0x4\n"
- "fmla v25.4s, v9.4s, v2.s[0]\n"
- "fmla v28.4s, v8.4s, v3.s[0]\n"
- "fmla v29.4s, v9.4s, v3.s[0]\n"
- "fmla v18.4s, v10.4s, v0.s[0]\n"
- "fmla v22.4s, v10.4s, v1.s[0]\n"
- "fmla v26.4s, v10.4s, v2.s[0]\n"
- "fmla v30.4s, v10.4s, v3.s[0]\n"
- "fmla v19.4s, v11.4s, v0.s[0]\n"
- "fmla v23.4s, v11.4s, v1.s[0]\n"
- "fmla v27.4s, v11.4s, v2.s[0]\n"
- "fmla v31.4s, v11.4s, v3.s[0]\n"
- "b.ne 6b\n"
- "5:\n"
- "ld1r {v14.4s}, [%[minptr]]\n"
- "ld1r {v15.4s}, [%[maxptr]]\n"
- "fmax v16.4s, v16.4s, v14.4s\n"
- "fmax v17.4s, v17.4s, v14.4s\n"
- "fmax v18.4s, v18.4s, v14.4s\n"
- "fmax v19.4s, v19.4s, v14.4s\n"
- "fmin v16.4s, v16.4s, v15.4s\n"
- "fmin v17.4s, v17.4s, v15.4s\n"
- "fmin v18.4s, v18.4s, v15.4s\n"
- "fmin v19.4s, v19.4s, v15.4s\n"
- "str q16, [%[c_ptr0]]\n"
- "fmax v20.4s, v20.4s, v14.4s\n"
- "fmax v21.4s, v21.4s, v14.4s\n"
- "fmax v22.4s, v22.4s, v14.4s\n"
- "str q17, [%[c_ptr0], #0x10]\n"
- "fmax v23.4s, v23.4s, v14.4s\n"
- "fmin v20.4s, v20.4s, v15.4s\n"
- "fmin v21.4s, v21.4s, v15.4s\n"
- "str q18, [%[c_ptr0], #0x20]\n"
- "fmin v22.4s, v22.4s, v15.4s\n"
- "fmin v23.4s, v23.4s, v15.4s\n"
- "fmax v24.4s, v24.4s, v14.4s\n"
- "str q19, [%[c_ptr0], #0x30]\n"
- "fmax v25.4s, v25.4s, v14.4s\n"
- "add %[c_ptr0], %[c_ptr0], #0x40\n"
- "fmax v26.4s, v26.4s, v14.4s\n"
- "str q20, [c_ptr1]\n"
- "fmin v24.4s, v24.4s, v15.4s\n"
- "fmin v25.4s, v25.4s, v15.4s\n"
- "fmax v27.4s, v27.4s, v14.4s\n"
- "str q21, [c_ptr1, #0x10]\n"
- "fmin v26.4s, v26.4s, v15.4s\n"
- "fmax v28.4s, v28.4s, v14.4s\n"
- "fmax v29.4s, v29.4s, v14.4s\n"
- "str q22, [c_ptr1, #0x20]\n"
- "fmin v27.4s, v27.4s, v15.4s\n"
- "fmax v30.4s, v30.4s, v14.4s\n"
- "fmin v28.4s, v28.4s, v15.4s\n"
- "str q23, [c_ptr1, #0x30]\n"
- "fmin v29.4s, v29.4s, v15.4s\n"
- "fmax v31.4s, v31.4s, v14.4s\n"
- "fmin v30.4s, v30.4s, v15.4s\n"
- "str q24, [c_ptr2]\n"
- "fmin v31.4s, v31.4s, v15.4s\n"
- "str q25, [c_ptr2, #0x10]\n"
- "str q26, [c_ptr2, #0x20]\n"
- "str q27, [c_ptr2, #0x30]\n"
- "str q28, [c_ptr3]\n"
- "str q29, [c_ptr3, #0x10]\n"
- "str q30, [c_ptr3, #0x20]\n"
- "str q31, [c_ptr3, #0x30]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [ldb] "r" (ldbb)
- : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans.hpp
deleted file mode 100644
index 7592798b0d..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans.hpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2017,2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __aarch64__
-
-namespace arm_gemm {
-
-// Actual kernel implementations
-void a64_sgemv_trans(const float *, const float *, float *, float, int, int, int);
-
-// Transposed SGEMV strategy class.
-class sgemv_trans {
-public:
- typedef float operand_type;
- typedef float result_type;
-
- typedef void (*kern_type)(const float *, const float *, float *, float, int, int, int);
-
- /* Kernel blocking parameters */
- static unsigned int out_width() {
- return 96;
- }
-
- static unsigned int k_unroll() {
- return 1;
- }
-
- kern_type kernel=a64_sgemv_trans;
-
- sgemv_trans(const CPUInfo *) { }
-};
-
-} // namespace arm_gemm
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans/generic.cpp
deleted file mode 100644
index cb7f239039..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemv_trans/generic.cpp
+++ /dev/null
@@ -1,1072 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __aarch64__
-
-#include <cstddef>
-
-#include <arm_neon.h>
-
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-// Kernel implementation - transposed GEMV
-//
-// The kernel will process "M" rows of A (= steps of dot product) and "N"
-// columns (= dot products total)
-//
-// General plan is to do as many columns simultaneously as possible - a
-// reasonable limit is half the NEON regfile = 64 total accumulators.
-//
-// It's possible that messing around with sub-blocking M and N can yield
-// higher performance, but that's left to the outer loop. In this kernel we
-// process all of M at the same time.
-
-
-// How far ahead to prefetch for the first and subsequent prefetches.
-// These values work for A72 on JunoR2...
-
-#define FIRST_PFD 9
-#define PFD 6
-
-namespace arm_gemm {
-
-void a64_sgemv_trans(const float *Astart, const float *Xstart, float *Ystart, float beta, int lda, int M, int N) {
- const float *a_ptr_base = Astart;
- float *y_ptr = Ystart;
- const bool beta0 = (beta == 0.0f);
-
- register const float32x4_t vb asm("v1") = vdupq_n_f32(beta);
-
- int firstpfd=FIRST_PFD;
- if (firstpfd > M) {
- firstpfd = (M-1);
- }
-
- int pfd = PFD;
- if (pfd > M) {
- pfd = (M-1);
- }
-
- ptrdiff_t jump = lda * sizeof(int);
-
- for (;N>=96;N-=96) {
- int k = M-1;
-
- const float *a_ptr = a_ptr_base;
- const float *x_ptr = Xstart;
- const float *pf_ptr = a_ptr;
- const float *firstpf_ptr = a_ptr;
- const float *pf_limit = a_ptr + (M * lda);
-
- for (int i=0; i<firstpfd; i++) {
- prefetch_1x(firstpf_ptr);
- firstpf_ptr += lda;
- }
-
- for (int i=0; i<pfd; i++) {
- prefetch_5x(pf_ptr + 16);
- pf_ptr += lda;
- }
-
- a_ptr_base += 96;
-
- __asm __volatile (
- "movi v8.4s,#0x0\n"
- "ldr w0, [%[x_ptr]]\n"
- "movi v9.4s,#0x0\n"
- "ldr q2, [%[a_ptr], #0]\n"
- "movi v10.4s,#0x0\n"
- "ldr q3, [%[a_ptr], #0x10]\n"
- "movi v11.4s,#0x0\n"
- "ldr q4, [%[a_ptr], #0x20]\n"
- "movi v12.4s,#0x0\n"
- "ldr q5, [%[a_ptr], #0x30]\n"
- "movi v13.4s,#0x0\n"
- "ldr q6, [%[a_ptr], #0x40]\n"
- "movi v14.4s,#0x0\n"
- "ldr q7, [%[a_ptr], #0x50]\n"
- "movi v15.4s,#0x0\n"
- ASM_PREFETCH("[%[firstpf_ptr]]")
- "movi v16.4s, #0x0\n"
- "movi v17.4s, #0x0\n"
- ASM_PREFETCH("[%[pf_ptr], #64]")
- "movi v18.4s, #0x0\n"
- "movi v19.4s, #0x0\n"
- ASM_PREFETCH("[%[pf_ptr], #128]")
- "movi v20.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- ASM_PREFETCH("[%[pf_ptr], #192]")
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- ASM_PREFETCH("[%[pf_ptr], #256]")
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- ASM_PREFETCH("[%[pf_ptr], #320]")
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "add %[pf_ptr], %[pf_ptr], %[jump]\n"
- "movi v28.4s, #0x0\n"
- "add %[firstpf_ptr], %[firstpf_ptr], %[jump]\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v31.4s, #0x0\n"
-
- // Skip everything if there are no iterations of the main loop to do.
- "cbz %w[k], 10f\n"
-
- // Loop with all prefetches. Exit this loop when firstpf_ptr
- // hits pf_limit.
- "1:\n"
- "dup v0.4s, w0\n"
- "ldr w0, [%[x_ptr], #4]\n"
- "add %[x_ptr], %[x_ptr], #0x4\n"
- "fmla v8.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x60]\n"
- "fmla v9.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x70]\n"
- ASM_PREFETCH("[%[firstpf_ptr]]")
- "fmla v10.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x80]\n"
- "add %[firstpf_ptr], %[firstpf_ptr], %[jump]\n"
- "fmla v11.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x90]\n"
- "sub %w[k], %w[k], #1\n"
- ASM_PREFETCH("[%[x_ptr], #128]")
- "fmla v12.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0xa0]\n"
- "fmla v13.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0xb0]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x40]")
- "fmla v14.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0xc0]\n"
- "fmla v15.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0xd0]\n"
- "fmla v16.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0xe0]\n"
- "fmla v17.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0xf0]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x80]")
- "fmla v18.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x100]\n"
- "fmla v19.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x110]\n"
- "fmla v20.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x120]\n"
- "fmla v21.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x130]\n"
- ASM_PREFETCH("[%[pf_ptr], #0xc0]")
- "fmla v22.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x140]\n"
- "fmla v23.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x150]\n"
- "fmla v24.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x160]\n"
- "fmla v25.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x170]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x100]")
- "add %[a_ptr], %[a_ptr], %[jump]\n"
- "fmla v26.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x00]\n"
- "fmla v27.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x10]\n"
- "fmla v28.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x20]\n"
- "fmla v29.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x30]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x140]")
- "fmla v30.4s, v6.4s, v0.4s\n"
- "add %[pf_ptr], %[pf_ptr], %[jump]\n"
- "ldr q6, [%[a_ptr], #0x40]\n"
- "fmla v31.4s, v7.4s, v0.4s\n"
- "cmp %[firstpf_ptr], %[pf_limit]\n"
- "ldr q7, [%[a_ptr], #0x50]\n"
- "blt 1b\n"
-
- // Check that there are still "main" prefetches to do.
- "cmp %[pf_ptr], %[pf_limit]\n"
- "bge 9f\n"
-
- // Just the main prefetches, exit this loop when pf_ptr hits pf_limit.
- "8:\n"
- "dup v0.4s, w0\n"
- "ldr w0, [%[x_ptr], #4]\n"
- "add %[x_ptr], %[x_ptr], #0x4\n"
- "fmla v8.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x60]\n"
- "fmla v9.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x70]\n"
- "fmla v10.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x80]\n"
- "fmla v11.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x90]\n"
- "sub %w[k], %w[k], #1\n"
- ASM_PREFETCH("[%[x_ptr], #128]")
- "fmla v12.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0xa0]\n"
- "fmla v13.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0xb0]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x40]")
- "fmla v14.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0xc0]\n"
- "fmla v15.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0xd0]\n"
- "fmla v16.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0xe0]\n"
- "fmla v17.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0xf0]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x80]")
- "fmla v18.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x100]\n"
- "fmla v19.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x110]\n"
- "fmla v20.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x120]\n"
- "fmla v21.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x130]\n"
- ASM_PREFETCH("[%[pf_ptr], #0xc0]")
- "fmla v22.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x140]\n"
- "fmla v23.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x150]\n"
- "fmla v24.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x160]\n"
- "fmla v25.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x170]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x100]")
- "add %[a_ptr], %[a_ptr], %[jump]\n"
- "fmla v26.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x00]\n"
- "fmla v27.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x10]\n"
- "fmla v28.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x20]\n"
- "fmla v29.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x30]\n"
- ASM_PREFETCH("[%[pf_ptr], #0x140]")
- "fmla v30.4s, v6.4s, v0.4s\n"
- "add %[pf_ptr], %[pf_ptr], %[jump]\n"
- "ldr q6, [%[a_ptr], #0x40]\n"
- "fmla v31.4s, v7.4s, v0.4s\n"
- "cmp %[pf_ptr], %[pf_limit]\n"
- "ldr q7, [%[a_ptr], #0x50]\n"
- "blt 8b\n"
-
- // Check that there is still work to do.
- "9:\n"
- "cmp %w[k], #0\n"
- "beq 10f\n"
-
- // Loop without prefetches, exit when k hits 0.
- "2:\n"
- "dup v0.4s, w0\n"
- "ldr w0, [%[x_ptr], #4]\n"
- "add %[x_ptr], %[x_ptr], #0x4\n"
- "fmla v8.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x60]\n"
- "fmla v9.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x70]\n"
- "fmla v10.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x80]\n"
- "fmla v11.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x90]\n"
- "subs %w[k], %w[k], #1\n"
- "fmla v12.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0xa0]\n"
- "fmla v13.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0xb0]\n"
- "fmla v14.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0xc0]\n"
- "fmla v15.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0xd0]\n"
- "fmla v16.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0xe0]\n"
- "fmla v17.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0xf0]\n"
- "fmla v18.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x100]\n"
- "fmla v19.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x110]\n"
- "fmla v20.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x120]\n"
- "fmla v21.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x130]\n"
- "fmla v22.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x140]\n"
- "fmla v23.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x150]\n"
- "fmla v24.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x160]\n"
- "fmla v25.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x170]\n"
- "add %[a_ptr], %[a_ptr], %[jump]\n"
- "fmla v26.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x00]\n"
- "fmla v27.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x10]\n"
- "fmla v28.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x20]\n"
- "fmla v29.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x30]\n"
- "fmla v30.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x40]\n"
- "fmla v31.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x50]\n"
- "bne 2b\n"
-
- "10:\n"
-
- // Final iteration
- "dup v0.4s, w0\n"
- "fmla v8.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x60]\n"
- "fmla v9.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x70]\n"
- "fmla v10.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x80]\n"
- "fmla v11.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x90]\n"
- "fmla v12.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0xa0]\n"
- "fmla v13.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0xb0]\n"
- "fmla v14.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0xc0]\n"
- "fmla v15.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0xd0]\n"
- "fmla v16.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0xe0]\n"
- "fmla v17.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0xf0]\n"
- "fmla v18.4s, v6.4s, v0.4s\n"
-
- "ldr q6, [%[a_ptr], #0x100]\n"
- "fmla v19.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x110]\n"
- "fmla v20.4s, v2.4s, v0.4s\n"
- "ldr q2, [%[a_ptr], #0x120]\n"
- "fmla v21.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[a_ptr], #0x130]\n"
- "fmla v22.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[a_ptr], #0x140]\n"
- "fmla v23.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[a_ptr], #0x150]\n"
- "fmla v24.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[a_ptr], #0x160]\n"
- "fmla v25.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[a_ptr], #0x170]\n"
- "fmla v26.4s, v2.4s, v0.4s\n"
- "cbnz %w[beta0], 11f\n"
- "ldr q2, [%[y_ptr]]\n"
- "fmla v27.4s, v3.4s, v0.4s\n"
- "ldr q3, [%[y_ptr], #0x10]\n"
- "fmla v28.4s, v4.4s, v0.4s\n"
- "ldr q4, [%[y_ptr], #0x20]\n"
- "fmla v29.4s, v5.4s, v0.4s\n"
- "ldr q5, [%[y_ptr], #0x30]\n"
- "fmla v30.4s, v6.4s, v0.4s\n"
- "ldr q6, [%[y_ptr], #0x40]\n"
- "fmla v31.4s, v7.4s, v0.4s\n"
- "ldr q7, [%[y_ptr], #0x50]\n"
-
- "fmla v8.4s, v2.4s, %[vb].4s\n"
- "ldr q2, [%[y_ptr], #0x60]\n"
- "fmla v9.4s, v3.4s, %[vb].4s\n"
- "ldr q3, [%[y_ptr], #0x70]\n"
- "fmla v10.4s, v4.4s, %[vb].4s\n"
- "ldr q4, [%[y_ptr], #0x80]\n"
- "fmla v11.4s, v5.4s, %[vb].4s\n"
- "ldr q5, [%[y_ptr], #0x90]\n"
- "fmla v12.4s, v6.4s, %[vb].4s\n"
- "ldr q6, [%[y_ptr], #0xa0]\n"
- "str q8, [%[y_ptr], #0x00]\n"
- "fmla v13.4s, v7.4s, %[vb].4s\n"
- "ldr q7, [%[y_ptr], #0xb0]\n"
- "str q9, [%[y_ptr], #0x10]\n"
- "fmla v14.4s, v2.4s, %[vb].4s\n"
- "ldr q2, [%[y_ptr], #0xc0]\n"
- "str q10, [%[y_ptr], #0x20]\n"
- "fmla v15.4s, v3.4s, %[vb].4s\n"
- "ldr q3, [%[y_ptr], #0xd0]\n"
- "str q11, [%[y_ptr], #0x30]\n"
- "fmla v16.4s, v4.4s, %[vb].4s\n"
- "ldr q4, [%[y_ptr], #0xe0]\n"
- "str q12, [%[y_ptr], #0x40]\n"
- "fmla v17.4s, v5.4s, %[vb].4s\n"
- "ldr q5, [%[y_ptr], #0xf0]\n"
- "str q13, [%[y_ptr], #0x50]\n"
- "fmla v18.4s, v6.4s, %[vb].4s\n"
- "ldr q6, [%[y_ptr], #0x100]\n"
- "str q14, [%[y_ptr], #0x60]\n"
- "fmla v19.4s, v7.4s, %[vb].4s\n"
- "ldr q7, [%[y_ptr], #0x110]\n"
- "str q15, [%[y_ptr], #0x70]\n"
- "fmla v20.4s, v2.4s, %[vb].4s\n"
- "ldr q2, [%[y_ptr], #0x120]\n"
- "str q16, [%[y_ptr], #0x80]\n"
- "fmla v21.4s, v3.4s, %[vb].4s\n"
- "ldr q3, [%[y_ptr], #0x130]\n"
- "str q17, [%[y_ptr], #0x90]\n"
- "fmla v22.4s, v4.4s, %[vb].4s\n"
- "ldr q4, [%[y_ptr], #0x140]\n"
- "str q18, [%[y_ptr], #0xa0]\n"
- "fmla v23.4s, v5.4s, %[vb].4s\n"
- "ldr q5, [%[y_ptr], #0x150]\n"
- "str q19, [%[y_ptr], #0xb0]\n"
- "fmla v24.4s, v6.4s, %[vb].4s\n"
- "ldr q6, [%[y_ptr], #0x160]\n"
- "str q20, [%[y_ptr], #0xc0]\n"
- "fmla v25.4s, v7.4s, %[vb].4s\n"
- "ldr q7, [%[y_ptr], #0x170]\n"
- "str q21, [%[y_ptr], #0xd0]\n"
- "fmla v26.4s, v2.4s, %[vb].4s\n"
- "str q22, [%[y_ptr], #0xe0]\n"
- "fmla v27.4s, v3.4s, %[vb].4s\n"
- "str q23, [%[y_ptr], #0xf0]\n"
- "fmla v28.4s, v4.4s, %[vb].4s\n"
- "str q24, [%[y_ptr], #0x100]\n"
- "fmla v29.4s, v5.4s, %[vb].4s\n"
- "str q25, [%[y_ptr], #0x110]\n"
- "fmla v30.4s, v6.4s, %[vb].4s\n"
- "str q26, [%[y_ptr], #0x120]\n"
- "fmla v31.4s, v7.4s, %[vb].4s\n"
- "str q27, [%[y_ptr], #0x130]\n"
- "b 12f\n"
-
- // beta 0 code - don't read.
- "11:\n"
- "str q8, [%[y_ptr], #0x00]\n"
- "fmla v27.4s, v3.4s, v0.4s\n"
- "str q9, [%[y_ptr], #0x10]\n"
- "fmla v28.4s, v4.4s, v0.4s\n"
- "str q10, [%[y_ptr], #0x20]\n"
- "fmla v29.4s, v5.4s, v0.4s\n"
- "str q11, [%[y_ptr], #0x30]\n"
- "fmla v30.4s, v6.4s, v0.4s\n"
- "str q12, [%[y_ptr], #0x40]\n"
- "fmla v31.4s, v7.4s, v0.4s\n"
-
- "str q13, [%[y_ptr], #0x50]\n"
- "str q14, [%[y_ptr], #0x60]\n"
- "str q15, [%[y_ptr], #0x70]\n"
- "str q16, [%[y_ptr], #0x80]\n"
- "str q17, [%[y_ptr], #0x90]\n"
- "str q18, [%[y_ptr], #0xa0]\n"
- "str q19, [%[y_ptr], #0xb0]\n"
- "str q20, [%[y_ptr], #0xc0]\n"
- "str q21, [%[y_ptr], #0xd0]\n"
- "str q22, [%[y_ptr], #0xe0]\n"
- "str q23, [%[y_ptr], #0xf0]\n"
- "str q24, [%[y_ptr], #0x100]\n"
- "str q25, [%[y_ptr], #0x110]\n"
- "str q26, [%[y_ptr], #0x120]\n"
- "str q27, [%[y_ptr], #0x130]\n"
-
- "12:\n"
- "stp q28, q29, [%[y_ptr], #0x140]\n"
- "stp q30, q31, [%[y_ptr], #0x160]\n"
- "add %[y_ptr], %[y_ptr], #0x180\n"
-
-
-
- : [a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr), [y_ptr] "+r" (y_ptr), [k] "+r" (k), [pf_ptr] "+r" (pf_ptr), [firstpf_ptr] "+r" (firstpf_ptr)
- : [jump] "r" (jump), [vb] "w" (vb), [pf_limit] "r" (pf_limit), [beta0] "r" (beta0)
- : "w0", "v0", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13",
- "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
- "v27", "v28", "v29", "v30", "v31", "cc"
- );
- }
-
- if (N>0) {
- // Handle N tail - up to 95 stragglers.
- // This is 0-23 vectors, plus optionally an 64-bit vector and/or a
- // single value for the remainder.
-
- // Independent pointers into the matrix for the odd 2 and odd 1.
- // Double up as flag to indicate whether they are needed.
- const float *odd2_aptr=NULL;
- const float *odd1_aptr=NULL;
-
- // Figure out how much work we need to do.
- int numvecs = N/4;
- int rem = N%4;
- int k=M;
-
- // Set up pointers for the odd 2/1 if needed.
- if (rem >= 2) {
- odd2_aptr = a_ptr_base + (numvecs * 4);
- }
-
- if (rem & 1) {
- odd1_aptr = a_ptr_base + (numvecs * 4) + (odd2_aptr==NULL ? 0 : 2);
- }
-
- const float *a_ptr = a_ptr_base;
- const float *firstpf_ptr = a_ptr_base;
- const float *pf_ptr = a_ptr_base;
- const float *pf_limit = a_ptr + (M * lda);
-
- const float *x_ptr = Xstart;
- int vecs=0; // Working variable to count how many vectors to work on.
- int dopf=1; // Track whether we are doing prefetches.
-
- // Figure out how many cache lines we need to prefetch each time.
- int numpfs = (N + 15) / 16;
-
- // Do initial prefetches
- for (int i=0; i<firstpfd+1; i++) {
- prefetch_1x(firstpf_ptr);
- firstpf_ptr += lda;
- }
-
- // Do "main" prefetches - adapt number to the number we actually need.
- if (numpfs > 1) {
- for (int i=0; i<pfd+1; i++) {
- switch (numpfs) {
- case 2:
- prefetch_1x(pf_ptr + 16);
- break;
-
- case 3:
- prefetch_2x(pf_ptr + 16);
- break;
-
- case 4:
- prefetch_3x(pf_ptr + 16);
- break;
-
- case 5:
- prefetch_4x(pf_ptr + 16);
- break;
-
- case 6:
- prefetch_5x(pf_ptr + 16);
- break;
-
- default:
- UNREACHABLE("Impossible.");
- }
- pf_ptr += lda;
- }
- } else {
- // Just disable additional prefetches
- dopf=0;
- }
-
- // Do the real work
- __asm __volatile (
- // Initialize all the vectors - not worth skipping this if only
- // some are needed.
- "movi v8.4s,#0x0\n"
- "ldr w0, [%[x_ptr]]\n"
- "movi v9.4s,#0x0\n"
- "movi v10.4s,#0x0\n"
- "movi v11.4s,#0x0\n"
- "movi v12.4s,#0x0\n"
- "movi v13.4s,#0x0\n"
- "movi v14.4s,#0x0\n"
- "movi v15.4s,#0x0\n"
- "movi v16.4s, #0x0\n"
- "movi v17.4s, #0x0\n"
- "movi v18.4s, #0x0\n"
- "movi v19.4s, #0x0\n"
- "movi v20.4s, #0x0\n"
- "movi v21.4s, #0x0\n"
- "movi v22.4s, #0x0\n"
- "movi v23.4s, #0x0\n"
- "movi v24.4s, #0x0\n"
- "movi v25.4s, #0x0\n"
- "movi v26.4s, #0x0\n"
- "movi v27.4s, #0x0\n"
- "movi v28.4s, #0x0\n"
- "movi v29.4s, #0x0\n"
- "movi v30.4s, #0x0\n"
- "movi v6.2s, #0x0\n"
- "movi v5.2s, #0x0\n"
-
- "1:\n"
- ASM_PREFETCH("[%[firstpf_ptr]]\n")
- "11:\n"
- "dup v0.4s, w0\n"
- "ldr w0, [%[x_ptr], #4]\n"
- "add %[x_ptr], %[x_ptr], #4\n"
-
- "cbz %w[numvecs], 2f\n"
- "mov %w[vecs], %w[numvecs]\n"
-
- // Vector 0
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x00]\n"
- "fmla v8.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 1
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x10]\n"
- "fmla v9.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 2
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x20]\n"
- "fmla v10.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 3
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x30]\n"
- "fmla v11.4s, v7.4s, v0.4s\n"
- // Prefetch
- "cbz %w[dopf], 3f\n"
- ASM_PREFETCH("[%[pf_ptr], #0x40]")
- "3:\n"
- "beq 2f\n"
-
- // Vector 4
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x40]\n"
- "fmla v12.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 5
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x50]\n"
- "fmla v13.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 6
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x60]\n"
- "fmla v14.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 7
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x70]\n"
- "fmla v15.4s, v7.4s, v0.4s\n"
- // Prefetch
- "cbz %w[dopf], 4f\n"
- ASM_PREFETCH("[%[pf_ptr], #0x80]")
- "4:\n"
- "beq 2f\n"
-
- // Vector 8
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x80]\n"
- "fmla v16.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 9
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x90]\n"
- "fmla v17.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 10
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xa0]\n"
- "fmla v18.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 11
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xb0]\n"
- "fmla v19.4s, v7.4s, v0.4s\n"
- // Prefetch
- "cbz %w[dopf], 5f\n"
- ASM_PREFETCH("[%[pf_ptr], #0xc0]")
- "5:\n"
- "beq 2f\n"
-
- // Vector 12
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xc0]\n"
- "fmla v20.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 13
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xd0]\n"
- "fmla v21.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 14
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xe0]\n"
- "fmla v22.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 15
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0xf0]\n"
- "fmla v23.4s, v7.4s, v0.4s\n"
- // Prefetch
- "cbz %w[dopf], 6f\n"
- ASM_PREFETCH("[%[pf_ptr], #0x100]")
- "6:\n"
- "beq 2f\n"
-
- // Vector 16
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x100]\n"
- "fmla v24.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 17
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x110]\n"
- "fmla v25.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 18
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x120]\n"
- "fmla v26.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 19
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x130]\n"
- "fmla v27.4s, v7.4s, v0.4s\n"
- // Prefetch
- "cbz %w[dopf], 7f\n"
- ASM_PREFETCH("[%[pf_ptr], #0x140]")
- "7:\n"
- "beq 2f\n"
-
- // Vector 20
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x140]\n"
- "fmla v28.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 21
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x150]\n"
- "fmla v29.4s, v7.4s, v0.4s\n"
- "beq 2f\n"
- // Vector 22
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7,[%[a_ptr], #0x160]\n"
- "fmla v30.4s, v7.4s, v0.4s\n"
-
- "2:\n"
- "add %[a_ptr], %[a_ptr], %[jump]\n"
-
- // Do the odd 2-vector, if needed
- "cbz %[odd2_aptr], 8f\n"
- "ldr d7, [%[odd2_aptr]]\n"
- "fmla v6.2s, v7.2s, v0.2s\n"
- "add %[odd2_aptr], %[odd2_aptr], %[jump]\n"
-
- "8:\n"
- // Do the odd 1-vector, if needed
- "cbz %[odd1_aptr], 9f\n"
- "ldr s7, [%[odd1_aptr]]\n"
- "fmla v5.2s, v7.2s, v0.2s\n"
- "add %[odd1_aptr], %[odd1_aptr], %[jump]\n"
-
- // Get out if needed.
- "9:\n"
- "subs %w[k], %w[k], #1\n"
- "beq 10f\n"
-
- // Update the "main" prefetch pointer, if it strays beyond the limit turn off "dopf"
- "add %[pf_ptr], %[pf_ptr], %[jump]\n"
- "cmp %[pf_ptr], %[pf_limit]\n"
- "csel %w[dopf], %w[dopf], WZR, LT\n"
-
- // Update the "leading" prefetch pointer, don't do the first
- // instruction of the loop if it's over the limit.
- "add %[firstpf_ptr], %[firstpf_ptr], %[jump]\n"
- "cmp %[firstpf_ptr], %[pf_limit]\n"
- "blt 1b\n"
- "b 11b\n"
-
- // Now write out the outputs
- "10:\n"
- "cbnz %w[beta0], 15f\n"
-
- "cbz %w[numvecs], 12f\n"
- "mov %w[vecs], %w[numvecs]\n"
-
- // Vector 0
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v8.4s, v7.4s, %[vb].4s\n"
- "str q8, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 1
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v9.4s, v7.4s, %[vb].4s\n"
- "str q9, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 2
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v10.4s, v7.4s, %[vb].4s\n"
- "str q10, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 3
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v11.4s, v7.4s, %[vb].4s\n"
- "str q11, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 4
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v12.4s, v7.4s, %[vb].4s\n"
- "str q12, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 5
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v13.4s, v7.4s, %[vb].4s\n"
- "str q13, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 6
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v14.4s, v7.4s, %[vb].4s\n"
- "str q14, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 7
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v15.4s, v7.4s, %[vb].4s\n"
- "str q15, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 8
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v16.4s, v7.4s, %[vb].4s\n"
- "str q16, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 9
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v17.4s, v7.4s, %[vb].4s\n"
- "str q17, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 10
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v18.4s, v7.4s, %[vb].4s\n"
- "str q18, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 11
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v19.4s, v7.4s, %[vb].4s\n"
- "str q19, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 12
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v20.4s, v7.4s, %[vb].4s\n"
- "str q20, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 13
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v21.4s, v7.4s, %[vb].4s\n"
- "str q21, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 14
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v22.4s, v7.4s, %[vb].4s\n"
- "str q22, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 15
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v23.4s, v7.4s, %[vb].4s\n"
- "str q23, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 16
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v24.4s, v7.4s, %[vb].4s\n"
- "str q24, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 17
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v25.4s, v7.4s, %[vb].4s\n"
- "str q25, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 18
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v26.4s, v7.4s, %[vb].4s\n"
- "str q26, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 19
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v27.4s, v7.4s, %[vb].4s\n"
- "str q27, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 20
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v28.4s, v7.4s, %[vb].4s\n"
- "str q28, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 21
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v29.4s, v7.4s, %[vb].4s\n"
- "str q29, [%[y_ptr]], #0x10\n"
- "beq 12f\n"
- // Vector 22
- "subs %w[vecs], %w[vecs], #1\n"
- "ldr q7, [%[y_ptr]]\n"
- "fmla v30.4s, v7.4s, %[vb].4s\n"
- "str q30, [%[y_ptr]], #0x10\n"
-
- // Odd 2
- "12:\n"
- "cbz %[odd2_aptr], 13f\n"
- "ldr d7, [%[y_ptr]]\n"
- "fmla v6.2s, v7.2s, %[vb].2s\n"
- "str d6, [%[y_ptr]], #0x8\n"
-
- // Odd 1
- "13:\n"
- "cbz %[odd1_aptr], 14f\n"
- "ldr s7, [%[y_ptr]]\n"
- "fmla v5.2s, v7.2s, %[vb].2s\n"
- "str s5, [%[y_ptr]]\n"
- "b 14f\n"
-
- "15:\n"
- // beta0 code
- "cbz %w[numvecs], 16f\n"
- "mov %w[vecs], %w[numvecs]\n"
-
- // Vector 0
- "subs %w[vecs], %w[vecs], #1\n"
- "str q8, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 1
- "subs %w[vecs], %w[vecs], #1\n"
- "str q9, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 2
- "subs %w[vecs], %w[vecs], #1\n"
- "str q10, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 3
- "subs %w[vecs], %w[vecs], #1\n"
- "str q11, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 4
- "subs %w[vecs], %w[vecs], #1\n"
- "str q12, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 5
- "subs %w[vecs], %w[vecs], #1\n"
- "str q13, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 6
- "subs %w[vecs], %w[vecs], #1\n"
- "str q14, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 7
- "subs %w[vecs], %w[vecs], #1\n"
- "str q15, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 8
- "subs %w[vecs], %w[vecs], #1\n"
- "str q16, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 9
- "subs %w[vecs], %w[vecs], #1\n"
- "str q17, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 10
- "subs %w[vecs], %w[vecs], #1\n"
- "str q18, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 11
- "subs %w[vecs], %w[vecs], #1\n"
- "str q19, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 12
- "subs %w[vecs], %w[vecs], #1\n"
- "str q20, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 13
- "subs %w[vecs], %w[vecs], #1\n"
- "str q21, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 14
- "subs %w[vecs], %w[vecs], #1\n"
- "str q22, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 15
- "subs %w[vecs], %w[vecs], #1\n"
- "str q23, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 16
- "subs %w[vecs], %w[vecs], #1\n"
- "str q24, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 17
- "subs %w[vecs], %w[vecs], #1\n"
- "str q25, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 18
- "subs %w[vecs], %w[vecs], #1\n"
- "str q26, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 19
- "subs %w[vecs], %w[vecs], #1\n"
- "str q27, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 20
- "subs %w[vecs], %w[vecs], #1\n"
- "str q28, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 21
- "subs %w[vecs], %w[vecs], #1\n"
- "str q29, [%[y_ptr]], #0x10\n"
- "beq 16f\n"
- // Vector 22
- "subs %w[vecs], %w[vecs], #1\n"
- "str q30, [%[y_ptr]], #0x10\n"
-
- // Odd 2
- "16:\n"
- "cbz %[odd2_aptr], 17f\n"
- "str d6, [%[y_ptr]], #0x8\n"
-
- // Odd 1
- "17:\n"
- "cbz %[odd1_aptr], 14f\n"
- "str s5, [%[y_ptr]]\n"
-
- "14:\n"
- : [a_ptr] "+r" (a_ptr), [x_ptr] "+r" (x_ptr), [y_ptr] "+r" (y_ptr), [k] "+r" (k),
- [pf_ptr] "+r" (pf_ptr), [firstpf_ptr] "+r" (firstpf_ptr),
- [odd1_aptr] "+r" (odd1_aptr), [odd2_aptr] "+r" (odd2_aptr),
- [dopf] "+r" (dopf), [vecs] "+r" (vecs)
- : [jump] "r" (jump), [vb] "w" (vb), [pf_limit] "r" (pf_limit), [numvecs] "r" (numvecs), [beta0] "r" (beta0)
- : "w0", "v0", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13",
- "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
- "v27", "v28", "v29", "v30", "v31", "cc"
- );
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp
index 477f3005e6..6f31efe6cb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp
index 1a0358b787..e9a094855a 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -55,7 +55,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6.hpp
index fcb188a247..fc087b73db 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x6.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x8.hpp
index bfe896b943..3de708cc68 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_s8s32_dot_4x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x6.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x6.hpp
index d817b9f47d..76931db4dd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x6.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x6.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x8.hpp
index b825333a30..d91416c3be 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_smallK_hybrid_u8u32_dot_4x8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp
index efc109fb34..eba98bb74d 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 2;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4/generic.cpp
index f16f452739..385a16fe10 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = ((K + 1) / 2) * 2;
const long loops_count = ((K + 8) / 16) - 1;
K -= loops_count * 16;
@@ -41,7 +41,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
const long leftovers = K;
const long blocks_count = (K + 1) / 2;
float nullbias[256];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -102,7 +102,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -398,7 +398,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -417,7 +417,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -859,7 +859,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -882,7 +882,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -1470,7 +1470,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1498,7 +1498,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -2232,7 +2232,7 @@ void sve_hybrid_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp
index 551c6f3a8c..641e5c12fd 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4/generic.cpp
index 4b67d747e2..76e3546c6f 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 8) / 16) - 1;
K -= loops_count * 16;
@@ -41,7 +41,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
const long leftovers = K;
const long blocks_count = (K + 3) / 4;
float nullbias[128];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (2 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -98,7 +98,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z1.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -298,7 +298,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"st1w z1.s, p1, [%[c_ptr0], #1, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -313,7 +313,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -524,7 +524,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -543,7 +543,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z3.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -869,7 +869,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -892,7 +892,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -1229,7 +1229,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -1256,7 +1256,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z5.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -1708,7 +1708,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr3\n"
".unreq c_ptr4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1739,7 +1739,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -2202,7 +2202,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr4\n"
".unreq c_ptr5\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
@@ -2237,7 +2237,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z7.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -2815,7 +2815,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr5\n"
".unreq c_ptr6\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "cc", "memory"
);
break;
@@ -2855,7 +2855,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
"incw %[temp], all, mul #1\n"
"ptrue p7.h\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -3444,7 +3444,7 @@ void sve_hybrid_bf16fp32_mmla_4VLx4(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr6\n"
".unreq c_ptr7\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp
index 6f26fd1404..bd457e9d27 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2.hpp
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2/generic.cpp
index fb943fe6fe..59dc6dc540 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_6VLx2/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 8) / 16) - 1;
K -= loops_count * 16;
@@ -41,7 +41,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
const long leftovers = K;
const long blocks_count = (K + 3) / 4;
float nullbias[192];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (3 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -100,7 +100,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p1.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z1.h, #0\n"
"ld1w z19.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -373,7 +373,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
"st1w z2.s, p2, [%[c_ptr0], #2, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -390,7 +390,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p1.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z19.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -676,7 +676,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -697,7 +697,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p1.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z3.h, #0\n"
"ld1w z19.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -1138,7 +1138,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1164,7 +1164,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p1.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p2.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z19.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -1618,7 +1618,7 @@ void sve_hybrid_bf16fp32_mmla_6VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2.hpp
index 0bf4492fdc..f25f7473cb 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2.hpp
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2/generic.cpp
index 3f201f0656..f38a2ea2e3 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_mmla_8VLx2/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 8) / 16) - 1;
K -= loops_count * 16;
@@ -41,7 +41,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
const long leftovers = K;
const long blocks_count = (K + 3) / 4;
float nullbias[256];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -102,7 +102,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z1.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -443,7 +443,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
"st1w z3.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -462,7 +462,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -818,7 +818,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -841,7 +841,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z3.h, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
@@ -1392,7 +1392,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1420,7 +1420,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -1986,7 +1986,7 @@ void sve_hybrid_bf16fp32_mmla_8VLx2(const bfloat16 *A, int lda, const bfloat16 *
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4.hpp
index fb27b7e103..ebef413848 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4/generic.cpp
index 3aef916ad2..7610a20ac0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16 *C, int ldc, int M, int N, int K, const __fp16 *bias, Activation act, bool append) {
+void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16 *C, int ldc, int M, int N, int K, const __fp16 *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 8) / 16) - 1;
K -= loops_count * 16;
@@ -40,7 +40,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
K -= (regs_count + 1) * 8;
const long leftovers = K;
__fp16 nullbias[512];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (4 * get_vector_length<__fp16>() * sizeof(__fp16)));
}
__fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
@@ -101,7 +101,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
"whilelt p2.h, %[temp], %[width]\n"
"inch %[temp], all, mul #1\n"
"whilelt p3.h, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1h z16.h, p0/z, [%[biasptr]]\n"
"ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -624,7 +624,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
"st1h z19.h, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -643,7 +643,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
"whilelt p2.h, %[temp], %[width]\n"
"inch %[temp], all, mul #1\n"
"whilelt p3.h, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1h z16.h, p0/z, [%[biasptr]]\n"
"ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -1416,7 +1416,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -1439,7 +1439,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
"whilelt p2.h, %[temp], %[width]\n"
"inch %[temp], all, mul #1\n"
"whilelt p3.h, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1h z16.h, p0/z, [%[biasptr]]\n"
"ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -2462,7 +2462,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -2490,7 +2490,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
"whilelt p2.h, %[temp], %[width]\n"
"inch %[temp], all, mul #1\n"
"whilelt p3.h, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1h z16.h, p0/z, [%[biasptr]]\n"
"ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -3763,7 +3763,7 @@ void sve_hybrid_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, __fp16
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4.hpp
index 28e00305f7..1bc8021e76 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
index 6b55959e2a..ce3624340e 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = K;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -40,7 +40,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
K -= (regs_count + 1) * 4;
const long leftovers = K;
float nullbias[256];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -101,7 +101,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -377,7 +377,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -396,7 +396,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -810,7 +810,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -833,7 +833,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -1385,7 +1385,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1413,7 +1413,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z16.s, p0/z, [%[biasptr]]\n"
"ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
"ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
@@ -2103,7 +2103,7 @@ void sve_hybrid_fp32_mla_4VLx4(const float *A, int lda, const float *B, float *C
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4.hpp
index 4bdf4e1d80..fd416ed2f4 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 2;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4/generic.cpp
index d8ed307c4b..1364585604 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mmla_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
+void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool accumulate) {
const int K_stride = ((K + 1) / 2) * 2;
const long loops_count = ((K + 4) / 8) - 1;
K -= loops_count * 8;
@@ -41,7 +41,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
const long leftovers = K;
const long blocks_count = (K + 1) / 2;
float nullbias[128];
- if (!append && !bias) {
+ if (!accumulate && !bias) {
memset(nullbias, 0, (2 * get_vector_length<float>() * sizeof(float)));
}
float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
@@ -98,7 +98,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z1.s, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
@@ -298,7 +298,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"st1w z1.s, p1, [%[c_ptr0], #1, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -313,7 +313,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -524,7 +524,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -543,7 +543,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z3.s, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
@@ -869,7 +869,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -892,7 +892,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -1229,7 +1229,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
@@ -1256,7 +1256,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z5.s, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
@@ -1708,7 +1708,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr3\n"
".unreq c_ptr4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "cc", "memory"
);
break;
@@ -1739,7 +1739,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -2202,7 +2202,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr4\n"
".unreq c_ptr5\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "cc", "memory"
);
break;
@@ -2237,7 +2237,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z7.s, #0\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
@@ -2815,7 +2815,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr5\n"
".unreq c_ptr6\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "cc", "memory"
);
break;
@@ -2855,7 +2855,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
"incw %[temp], all, mul #1\n"
"ptrue p7.s\n"
"whilelt p1.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"ld1w z15.s, p0/z, [%[biasptr]]\n"
"ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
"add %[a_ptr0], %[a_ptr0], #0x10\n"
@@ -3444,7 +3444,7 @@ void sve_hybrid_fp32_mmla_4VLx4(const float *A, int lda, const float *B, float *
".unreq c_ptr6\n"
".unreq c_ptr7\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4.hpp
index 230a2cf19f..c500f43fe0 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4/generic.cpp
index 46fc500476..b30b8845a6 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool append) {
+void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -80,7 +80,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -366,7 +366,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -385,7 +385,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -809,7 +809,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -832,7 +832,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -1394,7 +1394,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1422,7 +1422,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -2122,7 +2122,7 @@ void sve_hybrid_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int32
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4.hpp
index f829fb0205..c325e522d7 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4.hpp
@@ -58,7 +58,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return true;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4/generic.cpp
index 13614700e3..565832e8de 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4/generic.cpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_4VLx4/generic.cpp
@@ -32,7 +32,7 @@
namespace arm_gemm {
-void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool append) {
+void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool accumulate) {
const int K_stride = ((K + 3) / 4) * 4;
const long loops_count = ((K + 16) / 32) - 1;
K -= loops_count * 32;
@@ -80,7 +80,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -366,7 +366,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
"st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
"addvl %[c_ptr0], %[c_ptr0], #4\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
);
break;
@@ -385,7 +385,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -809,7 +809,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
".unreq a_ptr1\n"
".unreq c_ptr1\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
);
break;
@@ -832,7 +832,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -1394,7 +1394,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
".unreq c_ptr1\n"
".unreq c_ptr2\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
);
break;
@@ -1422,7 +1422,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
"whilelt p2.s, %[temp], %[width]\n"
"incw %[temp], all, mul #1\n"
"whilelt p3.s, %[temp], %[width]\n"
- "cbnz %[append], 1f\n"
+ "cbnz %[accumulate], 1f\n"
"mov z16.s, #0\n"
"ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
"mov z17.s, #0\n"
@@ -2122,7 +2122,7 @@ void sve_hybrid_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, uin
".unreq c_ptr2\n"
".unreq c_ptr3\n"
: [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
+ : [width] "r" (width), [accumulate] "r" (static_cast<uint64_t>(accumulate)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers)
: "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
);
break;
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4.hpp
deleted file mode 100644
index 6738809934..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-#include "../bfloat.hpp"
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void sve_native_bf16fp32_dot_4VLx4(const bfloat16 *, int, const bfloat16 *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
-class native_bf16fp32_dot_4VLx4
-{
-public:
- typedef bfloat16 operand_type;
- typedef float result_type;
-
- typedef void (*kern_type)(const bfloat16 *, int, const bfloat16 *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return get_vector_length<float>() * 4;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 2;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return true;
- }
-
- static constexpr bool supports_activation()
- {
- return true;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=sve_native_bf16fp32_dot_4VLx4;
-
- native_bf16fp32_dot_4VLx4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4/generic.cpp
deleted file mode 100644
index d3bd89b8c5..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_bf16fp32_dot_4VLx4/generic.cpp
+++ /dev/null
@@ -1,3290 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_SVE
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-#include "../../bfloat.hpp"
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void sve_native_bf16fp32_dot_4VLx4(const bfloat16 *A, int lda, const bfloat16 *B, int ldb, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
- const long loops_count = ((K + 8) / 16) - 1;
- K -= loops_count * 16;
- const long regs_count = (K / 8) - 1;
- K -= (regs_count + 1) * 8;
- const long leftovers = K;
- const long blocks_count = K / 2;
- const long odds_count = K - (blocks_count * 2);
- float nullbias[256];
- if (!append && !bias) {
- memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
- }
- float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
- float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
- const float * const minptr = &minval;
- const float * const maxptr = &maxval;
-
- switch(act.type)
- {
- default:
- case Activation::Type::None:
- break;
- case Activation::Type::BoundedReLU:
- maxval = static_cast<float>(act.param1);
- /* fall through */
- case Activation::Type::ReLU:
- minval = 0.0f;
- break;
- }
-
- int rows_to_compute;
-
- for (int y=0; y<M; y+=rows_to_compute) {
- const bfloat16 * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(bfloat16);
-
- float *c_ptr0 = C + (y * ldc);
-
- rows_to_compute = M-y;
- if (rows_to_compute > 4) {
- if (rows_to_compute % 4) {
- rows_to_compute = 4 - 1;
- } else {
- rows_to_compute = 4;
- }
- }
-
- for (int x0=0; x0<N; x0+=(4 * get_vector_length<float>())) {
- const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<float>()));
- long loops = loops_count;
- long regs = regs_count;
- long temp = 0;
- long blocks = blocks_count;
- long odds = odds_count;
- const bfloat16 *a_ptr0 = a_ptr0_base;
- const bfloat16 *b_ptr0 = B + x0;
- const bfloat16 *b_ptr1 = b_ptr0 + ldb;
- long ldbb = ldb * sizeof(bfloat16) * 2;
- const unsigned long ldcb = ldc * sizeof(float);
- const float *biasptr = bias ? bias+x0 : nullbias;
-
- switch(rows_to_compute) {
- case 1:
- __asm __volatile (
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "whilelt p4.h, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "whilelt p5.h, %[temp], %[width]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "cbz %[regs], 3f\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "8:\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "9:\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "10:\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "11:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "b 7f\n"
- "3:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "cbz %[blocks], 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- "b.eq 13f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- "b.eq 14f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "15:\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- "b 7f\n"
- "14:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "16:\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- "b 7f\n"
- "13:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "17:\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- "b 7f\n"
- "12:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "18:\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- "7:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "whilelt p4.h, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "whilelt p5.h, %[temp], %[width]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "incw %[temp], all, mul #1\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z22.d, z18.d\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "mov z23.d, z19.d\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "cbz %[regs], 3f\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "8:\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "9:\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "10:\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "11:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "b 7f\n"
- "3:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "cbz %[blocks], 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- "b.eq 13f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- "b.eq 14f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "15:\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- "b 7f\n"
- "14:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "16:\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- "b 7f\n"
- "13:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "17:\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- "b 7f\n"
- "12:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "18:\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- "7:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "whilelt p4.h, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "ld1rqh z2.h, p7/z, [a_ptr2]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "mov z25.d, z17.d\n"
- "whilelt p5.h, %[temp], %[width]\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "mov z22.d, z18.d\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "mov z26.d, z18.d\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "mov z23.d, z19.d\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z27.d, z19.d\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1rqh z2.h, p7/z, [a_ptr2, #-0x10]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "cbz %[regs], 3f\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "ld1rqh z2.h, p6/z, [a_ptr2, #0x10]\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "8:\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "9:\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "10:\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "11:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "b 7f\n"
- "3:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1rqh z6.h, p6/z, [a_ptr2]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "cbz %[blocks], 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "b.eq 13f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "b.eq 14f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "15:\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- "b 7f\n"
- "14:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "16:\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- "b 7f\n"
- "13:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "17:\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "b 7f\n"
- "12:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "18:\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "7:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "fmax z24.s, p7/m, z24.s, z14.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.s, p7/m, z25.s, z14.s\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.s, p7/m, z26.s, z14.s\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "fmin z24.s, p7/m, z24.s, z15.s\n"
- "fmin z25.s, p7/m, z25.s, z15.s\n"
- "fmax z27.s, p7/m, z27.s, z14.s\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.s, p7/m, z26.s, z15.s\n"
- "fmin z27.s, p7/m, z27.s, z15.s\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "whilelt p4.h, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "mov z28.d, z16.d\n"
- "ld1rqh z2.h, p7/z, [a_ptr2]\n"
- "ld1rqh z3.h, p7/z, [a_ptr3]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "mov z25.d, z17.d\n"
- "whilelt p5.h, %[temp], %[width]\n"
- "mov z29.d, z17.d\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z22.d, z18.d\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "mov z26.d, z18.d\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "mov z30.d, z18.d\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "mov z23.d, z19.d\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "mov z27.d, z19.d\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z31.d, z19.d\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x6463411c // bfdot z28.s, z8.h, z3.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z7.h, p7/z, [a_ptr3]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- ".inst 0x6463413d // bfdot z29.s, z9.h, z3.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- ".inst 0x6463415e // bfdot z30.s, z10.h, z3.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- ".inst 0x6463417f // bfdot z31.s, z11.h, z3.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646b419c // bfdot z28.s, z12.h, z3.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646b41bd // bfdot z29.s, z13.h, z3.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646b41de // bfdot z30.s, z14.h, z3.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x646b41ff // bfdot z31.s, z15.h, z3.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x6473411c // bfdot z28.s, z8.h, z3.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x6473413d // bfdot z29.s, z9.h, z3.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x6473415e // bfdot z30.s, z10.h, z3.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6473417f // bfdot z31.s, z11.h, z3.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647b419c // bfdot z28.s, z12.h, z3.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647b41bd // bfdot z29.s, z13.h, z3.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647b41de // bfdot z30.s, z14.h, z3.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "ld1rqh z2.h, p7/z, [a_ptr2, #-0x10]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x647b41ff // bfdot z31.s, z15.h, z3.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1rqh z3.h, p7/z, [a_ptr3, #-0x10]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- ".inst 0x6467411c // bfdot z28.s, z8.h, z7.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x6467413d // bfdot z29.s, z9.h, z7.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x6467415e // bfdot z30.s, z10.h, z7.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6467417f // bfdot z31.s, z11.h, z7.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646f419c // bfdot z28.s, z12.h, z7.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646f41bd // bfdot z29.s, z13.h, z7.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646f41de // bfdot z30.s, z14.h, z7.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x646f41ff // bfdot z31.s, z15.h, z7.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x6477411c // bfdot z28.s, z8.h, z7.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- ".inst 0x6477413d // bfdot z29.s, z9.h, z7.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x6477415e // bfdot z30.s, z10.h, z7.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6477417f // bfdot z31.s, z11.h, z7.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- ".inst 0x647f419c // bfdot z28.s, z12.h, z7.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- ".inst 0x647f41bd // bfdot z29.s, z13.h, z7.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- ".inst 0x647f41de // bfdot z30.s, z14.h, z7.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x647f41ff // bfdot z31.s, z15.h, z7.h[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- ".inst 0x6463411c // bfdot z28.s, z8.h, z3.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z7.h, p7/z, [a_ptr3]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x6463413d // bfdot z29.s, z9.h, z3.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- ".inst 0x6463415e // bfdot z30.s, z10.h, z3.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6463417f // bfdot z31.s, z11.h, z3.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646b419c // bfdot z28.s, z12.h, z3.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646b41bd // bfdot z29.s, z13.h, z3.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646b41de // bfdot z30.s, z14.h, z3.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x646b41ff // bfdot z31.s, z15.h, z3.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x6473411c // bfdot z28.s, z8.h, z3.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x6473413d // bfdot z29.s, z9.h, z3.h[2]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x6473415e // bfdot z30.s, z10.h, z3.h[2]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6473417f // bfdot z31.s, z11.h, z3.h[2]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647b419c // bfdot z28.s, z12.h, z3.h[3]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647b41bd // bfdot z29.s, z13.h, z3.h[3]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647b41de // bfdot z30.s, z14.h, z3.h[3]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- "ld1rqh z2.h, p6/z, [a_ptr2, #0x10]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- ".inst 0x647b41ff // bfdot z31.s, z15.h, z3.h[3]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- "ld1rqh z3.h, p6/z, [a_ptr3, #0x10]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- ".inst 0x6467411c // bfdot z28.s, z8.h, z7.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- "addvl a_ptr3, a_ptr3, #2\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x6467413d // bfdot z29.s, z9.h, z7.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x6467415e // bfdot z30.s, z10.h, z7.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6467417f // bfdot z31.s, z11.h, z7.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646f419c // bfdot z28.s, z12.h, z7.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646f41bd // bfdot z29.s, z13.h, z7.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646f41de // bfdot z30.s, z14.h, z7.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x646f41ff // bfdot z31.s, z15.h, z7.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x6477411c // bfdot z28.s, z8.h, z7.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x6477413d // bfdot z29.s, z9.h, z7.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x6477415e // bfdot z30.s, z10.h, z7.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- ".inst 0x6477417f // bfdot z31.s, z11.h, z7.h[2]\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- ".inst 0x647f419c // bfdot z28.s, z12.h, z7.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- ".inst 0x647f41bd // bfdot z29.s, z13.h, z7.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- ".inst 0x647f41de // bfdot z30.s, z14.h, z7.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- ".inst 0x647f41ff // bfdot z31.s, z15.h, z7.h[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- ".inst 0x6463411c // bfdot z28.s, z8.h, z3.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- ".inst 0x6463413d // bfdot z29.s, z9.h, z3.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- ".inst 0x6463415e // bfdot z30.s, z10.h, z3.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- ".inst 0x6463417f // bfdot z31.s, z11.h, z3.h[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646b419c // bfdot z28.s, z12.h, z3.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646b41bd // bfdot z29.s, z13.h, z3.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646b41de // bfdot z30.s, z14.h, z3.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- ".inst 0x646b41ff // bfdot z31.s, z15.h, z3.h[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x6473411c // bfdot z28.s, z8.h, z3.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x6473413d // bfdot z29.s, z9.h, z3.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x6473415e // bfdot z30.s, z10.h, z3.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- ".inst 0x6473417f // bfdot z31.s, z11.h, z3.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "8:\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647b419c // bfdot z28.s, z12.h, z3.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647b41bd // bfdot z29.s, z13.h, z3.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647b41de // bfdot z30.s, z14.h, z3.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- ".inst 0x647b41ff // bfdot z31.s, z15.h, z3.h[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "9:\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x6473411c // bfdot z28.s, z8.h, z3.h[2]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- ".inst 0x6473413d // bfdot z29.s, z9.h, z3.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x6473415e // bfdot z30.s, z10.h, z3.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- ".inst 0x6473417f // bfdot z31.s, z11.h, z3.h[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "10:\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646b419c // bfdot z28.s, z12.h, z3.h[1]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646b41bd // bfdot z29.s, z13.h, z3.h[1]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646b41de // bfdot z30.s, z14.h, z3.h[1]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- ".inst 0x646b41ff // bfdot z31.s, z15.h, z3.h[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "11:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- ".inst 0x6463411c // bfdot z28.s, z8.h, z3.h[0]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- ".inst 0x6463413d // bfdot z29.s, z9.h, z3.h[0]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- ".inst 0x6463415e // bfdot z30.s, z10.h, z3.h[0]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- ".inst 0x6463417f // bfdot z31.s, z11.h, z3.h[0]\n"
- "b 7f\n"
- "3:\n"
- ".inst 0x64604110 // bfdot z16.s, z8.h, z0.h[0]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64614114 // bfdot z20.s, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- ".inst 0x64624118 // bfdot z24.s, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- ".inst 0x6463411c // bfdot z28.s, z8.h, z3.h[0]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64604131 // bfdot z17.s, z9.h, z0.h[0]\n"
- "ld1rqh z6.h, p6/z, [a_ptr2]\n"
- ".inst 0x64614135 // bfdot z21.s, z9.h, z1.h[0]\n"
- "ld1rqh z7.h, p6/z, [a_ptr3]\n"
- ".inst 0x64624139 // bfdot z25.s, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- ".inst 0x6463413d // bfdot z29.s, z9.h, z3.h[0]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x64604152 // bfdot z18.s, z10.h, z0.h[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- ".inst 0x64614156 // bfdot z22.s, z10.h, z1.h[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- ".inst 0x6462415a // bfdot z26.s, z10.h, z2.h[0]\n"
- "addvl a_ptr3, a_ptr3, #1\n"
- ".inst 0x6463415e // bfdot z30.s, z10.h, z3.h[0]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x64604173 // bfdot z19.s, z11.h, z0.h[0]\n"
- ".inst 0x64614177 // bfdot z23.s, z11.h, z1.h[0]\n"
- ".inst 0x6462417b // bfdot z27.s, z11.h, z2.h[0]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- ".inst 0x6463417f // bfdot z31.s, z11.h, z3.h[0]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64684190 // bfdot z16.s, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- ".inst 0x64694194 // bfdot z20.s, z12.h, z1.h[1]\n"
- ".inst 0x646a4198 // bfdot z24.s, z12.h, z2.h[1]\n"
- ".inst 0x646b419c // bfdot z28.s, z12.h, z3.h[1]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x646841b1 // bfdot z17.s, z13.h, z0.h[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- ".inst 0x646941b5 // bfdot z21.s, z13.h, z1.h[1]\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x646a41b9 // bfdot z25.s, z13.h, z2.h[1]\n"
- ".inst 0x646b41bd // bfdot z29.s, z13.h, z3.h[1]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- ".inst 0x646841d2 // bfdot z18.s, z14.h, z0.h[1]\n"
- ".inst 0x646941d6 // bfdot z22.s, z14.h, z1.h[1]\n"
- ".inst 0x646a41da // bfdot z26.s, z14.h, z2.h[1]\n"
- ".inst 0x646b41de // bfdot z30.s, z14.h, z3.h[1]\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- ".inst 0x646841f3 // bfdot z19.s, z15.h, z0.h[1]\n"
- ".inst 0x646941f7 // bfdot z23.s, z15.h, z1.h[1]\n"
- ".inst 0x646a41fb // bfdot z27.s, z15.h, z2.h[1]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- ".inst 0x646b41ff // bfdot z31.s, z15.h, z3.h[1]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- ".inst 0x64704110 // bfdot z16.s, z8.h, z0.h[2]\n"
- ".inst 0x64714114 // bfdot z20.s, z8.h, z1.h[2]\n"
- ".inst 0x64724118 // bfdot z24.s, z8.h, z2.h[2]\n"
- ".inst 0x6473411c // bfdot z28.s, z8.h, z3.h[2]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- ".inst 0x64704131 // bfdot z17.s, z9.h, z0.h[2]\n"
- ".inst 0x64714135 // bfdot z21.s, z9.h, z1.h[2]\n"
- ".inst 0x64724139 // bfdot z25.s, z9.h, z2.h[2]\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x6473413d // bfdot z29.s, z9.h, z3.h[2]\n"
- ".inst 0x64704152 // bfdot z18.s, z10.h, z0.h[2]\n"
- ".inst 0x64714156 // bfdot z22.s, z10.h, z1.h[2]\n"
- ".inst 0x6472415a // bfdot z26.s, z10.h, z2.h[2]\n"
- ".inst 0x6473415e // bfdot z30.s, z10.h, z3.h[2]\n"
- ".inst 0x64704173 // bfdot z19.s, z11.h, z0.h[2]\n"
- ".inst 0x64714177 // bfdot z23.s, z11.h, z1.h[2]\n"
- ".inst 0x6472417b // bfdot z27.s, z11.h, z2.h[2]\n"
- ".inst 0x6473417f // bfdot z31.s, z11.h, z3.h[2]\n"
- ".inst 0x64784190 // bfdot z16.s, z12.h, z0.h[3]\n"
- ".inst 0x64794194 // bfdot z20.s, z12.h, z1.h[3]\n"
- ".inst 0x647a4198 // bfdot z24.s, z12.h, z2.h[3]\n"
- ".inst 0x647b419c // bfdot z28.s, z12.h, z3.h[3]\n"
- ".inst 0x647841b1 // bfdot z17.s, z13.h, z0.h[3]\n"
- ".inst 0x647941b5 // bfdot z21.s, z13.h, z1.h[3]\n"
- ".inst 0x647a41b9 // bfdot z25.s, z13.h, z2.h[3]\n"
- ".inst 0x647b41bd // bfdot z29.s, z13.h, z3.h[3]\n"
- ".inst 0x647841d2 // bfdot z18.s, z14.h, z0.h[3]\n"
- ".inst 0x647941d6 // bfdot z22.s, z14.h, z1.h[3]\n"
- ".inst 0x647a41da // bfdot z26.s, z14.h, z2.h[3]\n"
- ".inst 0x647b41de // bfdot z30.s, z14.h, z3.h[3]\n"
- ".inst 0x647841f3 // bfdot z19.s, z15.h, z0.h[3]\n"
- ".inst 0x647941f7 // bfdot z23.s, z15.h, z1.h[3]\n"
- ".inst 0x647a41fb // bfdot z27.s, z15.h, z2.h[3]\n"
- ".inst 0x647b41ff // bfdot z31.s, z15.h, z3.h[3]\n"
- "cbz %[blocks], 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- ".inst 0x6467411c // bfdot z28.s, z8.h, z7.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x6467413d // bfdot z29.s, z9.h, z7.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x6467415e // bfdot z30.s, z10.h, z7.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- ".inst 0x6467417f // bfdot z31.s, z11.h, z7.h[0]\n"
- "b.eq 13f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z14.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z8.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646f419c // bfdot z28.s, z12.h, z7.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646f41bd // bfdot z29.s, z13.h, z7.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646f41de // bfdot z30.s, z14.h, z7.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- ".inst 0x646f41ff // bfdot z31.s, z15.h, z7.h[1]\n"
- "b.eq 14f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z10.h, p4/z, [%[b_ptr1]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z12.h, p5/z, [%[b_ptr1], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x6477411c // bfdot z28.s, z8.h, z7.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- ".inst 0x6477413d // bfdot z29.s, z9.h, z7.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x6477415e // bfdot z30.s, z10.h, z7.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- ".inst 0x6477417f // bfdot z31.s, z11.h, z7.h[2]\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "15:\n"
- ".inst 0x647c4190 // bfdot z16.s, z12.h, z4.h[3]\n"
- ".inst 0x647d4194 // bfdot z20.s, z12.h, z5.h[3]\n"
- ".inst 0x647e4198 // bfdot z24.s, z12.h, z6.h[3]\n"
- ".inst 0x647f419c // bfdot z28.s, z12.h, z7.h[3]\n"
- ".inst 0x647c41b1 // bfdot z17.s, z13.h, z4.h[3]\n"
- ".inst 0x647d41b5 // bfdot z21.s, z13.h, z5.h[3]\n"
- ".inst 0x647e41b9 // bfdot z25.s, z13.h, z6.h[3]\n"
- ".inst 0x647f41bd // bfdot z29.s, z13.h, z7.h[3]\n"
- ".inst 0x647c41d2 // bfdot z18.s, z14.h, z4.h[3]\n"
- ".inst 0x647d41d6 // bfdot z22.s, z14.h, z5.h[3]\n"
- ".inst 0x647e41da // bfdot z26.s, z14.h, z6.h[3]\n"
- ".inst 0x647f41de // bfdot z30.s, z14.h, z7.h[3]\n"
- ".inst 0x647c41f3 // bfdot z19.s, z15.h, z4.h[3]\n"
- ".inst 0x647d41f7 // bfdot z23.s, z15.h, z5.h[3]\n"
- ".inst 0x647e41fb // bfdot z27.s, z15.h, z6.h[3]\n"
- ".inst 0x647f41ff // bfdot z31.s, z15.h, z7.h[3]\n"
- "b 7f\n"
- "14:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "16:\n"
- ".inst 0x64744110 // bfdot z16.s, z8.h, z4.h[2]\n"
- ".inst 0x64754114 // bfdot z20.s, z8.h, z5.h[2]\n"
- ".inst 0x64764118 // bfdot z24.s, z8.h, z6.h[2]\n"
- ".inst 0x6477411c // bfdot z28.s, z8.h, z7.h[2]\n"
- ".inst 0x64744131 // bfdot z17.s, z9.h, z4.h[2]\n"
- ".inst 0x64754135 // bfdot z21.s, z9.h, z5.h[2]\n"
- ".inst 0x64764139 // bfdot z25.s, z9.h, z6.h[2]\n"
- ".inst 0x6477413d // bfdot z29.s, z9.h, z7.h[2]\n"
- ".inst 0x64744152 // bfdot z18.s, z10.h, z4.h[2]\n"
- ".inst 0x64754156 // bfdot z22.s, z10.h, z5.h[2]\n"
- ".inst 0x6476415a // bfdot z26.s, z10.h, z6.h[2]\n"
- ".inst 0x6477415e // bfdot z30.s, z10.h, z7.h[2]\n"
- ".inst 0x64744173 // bfdot z19.s, z11.h, z4.h[2]\n"
- ".inst 0x64754177 // bfdot z23.s, z11.h, z5.h[2]\n"
- ".inst 0x6476417b // bfdot z27.s, z11.h, z6.h[2]\n"
- ".inst 0x6477417f // bfdot z31.s, z11.h, z7.h[2]\n"
- "b 7f\n"
- "13:\n"
- "cbz %[odds], 7f\n"
- "mov z14.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z8.h, #0\n"
- "ld1h z13.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z15.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z12.h, z13.h, z14.h\n"
- "zip2 z13.h, z13.h, z14.h\n"
- "zip1 z14.h, z15.h, z8.h\n"
- "zip2 z15.h, z15.h, z8.h\n"
- "17:\n"
- ".inst 0x646c4190 // bfdot z16.s, z12.h, z4.h[1]\n"
- ".inst 0x646d4194 // bfdot z20.s, z12.h, z5.h[1]\n"
- ".inst 0x646e4198 // bfdot z24.s, z12.h, z6.h[1]\n"
- ".inst 0x646f419c // bfdot z28.s, z12.h, z7.h[1]\n"
- ".inst 0x646c41b1 // bfdot z17.s, z13.h, z4.h[1]\n"
- ".inst 0x646d41b5 // bfdot z21.s, z13.h, z5.h[1]\n"
- ".inst 0x646e41b9 // bfdot z25.s, z13.h, z6.h[1]\n"
- ".inst 0x646f41bd // bfdot z29.s, z13.h, z7.h[1]\n"
- ".inst 0x646c41d2 // bfdot z18.s, z14.h, z4.h[1]\n"
- ".inst 0x646d41d6 // bfdot z22.s, z14.h, z5.h[1]\n"
- ".inst 0x646e41da // bfdot z26.s, z14.h, z6.h[1]\n"
- ".inst 0x646f41de // bfdot z30.s, z14.h, z7.h[1]\n"
- ".inst 0x646c41f3 // bfdot z19.s, z15.h, z4.h[1]\n"
- ".inst 0x646d41f7 // bfdot z23.s, z15.h, z5.h[1]\n"
- ".inst 0x646e41fb // bfdot z27.s, z15.h, z6.h[1]\n"
- ".inst 0x646f41ff // bfdot z31.s, z15.h, z7.h[1]\n"
- "b 7f\n"
- "12:\n"
- "cbz %[odds], 7f\n"
- "mov z10.h, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z12.h, #0\n"
- "ld1h z9.h, p4/z, [%[b_ptr0]]\n"
- "ld1h z11.h, p5/z, [%[b_ptr0], #1, MUL VL]\n"
- "zip1 z8.h, z9.h, z10.h\n"
- "zip2 z9.h, z9.h, z10.h\n"
- "zip1 z10.h, z11.h, z12.h\n"
- "zip2 z11.h, z11.h, z12.h\n"
- "18:\n"
- ".inst 0x64644110 // bfdot z16.s, z8.h, z4.h[0]\n"
- ".inst 0x64654114 // bfdot z20.s, z8.h, z5.h[0]\n"
- ".inst 0x64664118 // bfdot z24.s, z8.h, z6.h[0]\n"
- ".inst 0x6467411c // bfdot z28.s, z8.h, z7.h[0]\n"
- ".inst 0x64644131 // bfdot z17.s, z9.h, z4.h[0]\n"
- ".inst 0x64654135 // bfdot z21.s, z9.h, z5.h[0]\n"
- ".inst 0x64664139 // bfdot z25.s, z9.h, z6.h[0]\n"
- ".inst 0x6467413d // bfdot z29.s, z9.h, z7.h[0]\n"
- ".inst 0x64644152 // bfdot z18.s, z10.h, z4.h[0]\n"
- ".inst 0x64654156 // bfdot z22.s, z10.h, z5.h[0]\n"
- ".inst 0x6466415a // bfdot z26.s, z10.h, z6.h[0]\n"
- ".inst 0x6467415e // bfdot z30.s, z10.h, z7.h[0]\n"
- ".inst 0x64644173 // bfdot z19.s, z11.h, z4.h[0]\n"
- ".inst 0x64654177 // bfdot z23.s, z11.h, z5.h[0]\n"
- ".inst 0x6466417b // bfdot z27.s, z11.h, z6.h[0]\n"
- ".inst 0x6467417f // bfdot z31.s, z11.h, z7.h[0]\n"
- "7:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "fmax z24.s, p7/m, z24.s, z14.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.s, p7/m, z25.s, z14.s\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.s, p7/m, z26.s, z14.s\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "fmin z24.s, p7/m, z24.s, z15.s\n"
- "fmin z25.s, p7/m, z25.s, z15.s\n"
- "fmax z27.s, p7/m, z27.s, z14.s\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.s, p7/m, z26.s, z15.s\n"
- "fmax z28.s, p7/m, z28.s, z14.s\n"
- "fmax z29.s, p7/m, z29.s, z14.s\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "fmin z27.s, p7/m, z27.s, z15.s\n"
- "fmax z30.s, p7/m, z30.s, z14.s\n"
- "fmin z28.s, p7/m, z28.s, z15.s\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "fmin z29.s, p7/m, z29.s, z15.s\n"
- "fmax z31.s, p7/m, z31.s, z14.s\n"
- "fmin z30.s, p7/m, z30.s, z15.s\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "fmin z31.s, p7/m, z31.s, z15.s\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- "st1w z28.s, p0, [c_ptr3]\n"
- "st1w z29.s, p1, [c_ptr3, #1, MUL VL]\n"
- "st1w z30.s, p2, [c_ptr3, #2, MUL VL]\n"
- "st1w z31.s, p3, [c_ptr3, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4.hpp
deleted file mode 100644
index 665e8656d2..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void sve_native_fp16_mla_4VLx4(const __fp16 *, int, const __fp16 *, int ldb, __fp16 *, int, int, int, int, const __fp16 *, Activation, bool);
-
-class native_fp16_mla_4VLx4
-{
-public:
- typedef __fp16 operand_type;
- typedef __fp16 result_type;
-
- typedef void (*kern_type)(const __fp16 *, int, const __fp16 *, int ldb, __fp16 *, int, int, int, int, const __fp16 *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return get_vector_length<__fp16>() * 4;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 1;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return true;
- }
-
- static constexpr bool supports_activation()
- {
- return true;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=sve_native_fp16_mla_4VLx4;
-
- native_fp16_mla_4VLx4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4/generic.cpp
deleted file mode 100644
index dd33c785cf..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp16_mla_4VLx4/generic.cpp
+++ /dev/null
@@ -1,3814 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_SVE
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void sve_native_fp16_mla_4VLx4(const __fp16 *A, int lda, const __fp16 *B, int ldb, __fp16 *C, int ldc, int M, int N, int K, const __fp16 *bias, Activation act, bool append) {
- const long loops_count = ((K + 8) / 16) - 1;
- K -= loops_count * 16;
- const long regs_count = (K / 8) - 1;
- K -= (regs_count + 1) * 8;
- const long leftovers = K;
- __fp16 nullbias[512];
- if (!append && !bias) {
- memset(nullbias, 0, (4 * get_vector_length<__fp16>() * sizeof(__fp16)));
- }
- __fp16 minval = - static_cast<__fp16>(std::numeric_limits<float>::infinity());
- __fp16 maxval = static_cast<__fp16>(std::numeric_limits<float>::infinity());
- const __fp16 * const minptr = &minval;
- const __fp16 * const maxptr = &maxval;
-
- switch(act.type)
- {
- default:
- case Activation::Type::None:
- break;
- case Activation::Type::BoundedReLU:
- maxval = static_cast<__fp16>(act.param1);
- /* fall through */
- case Activation::Type::ReLU:
- minval = 0.0f;
- break;
- }
-
- int rows_to_compute;
-
- for (int y=0; y<M; y+=rows_to_compute) {
- const __fp16 * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(__fp16);
-
- __fp16 *c_ptr0 = C + (y * ldc);
-
- rows_to_compute = M-y;
- if (rows_to_compute > 4) {
- if (rows_to_compute % 4) {
- rows_to_compute = 4 - 1;
- } else {
- rows_to_compute = 4;
- }
- }
-
- for (int x0=0; x0<N; x0+=(4 * get_vector_length<__fp16>())) {
- const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<__fp16>()));
- long loops = loops_count;
- long regs = regs_count;
- long temp = 0;
- long blocks = leftovers;
- const __fp16 *a_ptr0 = a_ptr0_base;
- const __fp16 *b_ptr0 = B + x0;
- long ldbb = ldb * sizeof(__fp16);
- const unsigned long ldcb = ldc * sizeof(__fp16);
- const __fp16 *biasptr = bias ? bias+x0 : nullbias;
-
- switch(rows_to_compute) {
- case 1:
- __asm __volatile (
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.h, %[temp], %[width]\n"
- "inch %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1h z16.h, p0/z, [%[biasptr]]\n"
- "whilelt p1.h, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "inch %[temp], all, mul #1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
- "whilelt p2.h, %[temp], %[width]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "inch %[temp], all, mul #1\n"
- "ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.h, %[temp], %[width]\n"
- "ld1h z19.h, p3/z, [%[biasptr], #3, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "subs %[loops], %[loops], #0x1\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "b.ne 2b\n"
- "1:\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[regs], 3f\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "4:\n"
- "ld1rh z14.h, p7/z, [%[minptr]]\n"
- "ld1rh z15.h, p7/z, [%[maxptr]]\n"
- "fmax z16.h, p7/m, z16.h, z14.h\n"
- "fmax z17.h, p7/m, z17.h, z14.h\n"
- "fmax z18.h, p7/m, z18.h, z14.h\n"
- "fmax z19.h, p7/m, z19.h, z14.h\n"
- "fmin z16.h, p7/m, z16.h, z15.h\n"
- "fmin z17.h, p7/m, z17.h, z15.h\n"
- "fmin z18.h, p7/m, z18.h, z15.h\n"
- "fmin z19.h, p7/m, z19.h, z15.h\n"
- "st1h z16.h, p0, [%[c_ptr0]]\n"
- "st1h z17.h, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1h z18.h, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1h z19.h, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.h, %[temp], %[width]\n"
- "inch %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1h z16.h, p0/z, [%[biasptr]]\n"
- "whilelt p1.h, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "inch %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "whilelt p2.h, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "inch %[temp], all, mul #1\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.h, %[temp], %[width]\n"
- "mov z22.d, z18.d\n"
- "ld1h z19.h, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "4:\n"
- "ld1rh z14.h, p7/z, [%[minptr]]\n"
- "ld1rh z15.h, p7/z, [%[maxptr]]\n"
- "fmax z16.h, p7/m, z16.h, z14.h\n"
- "fmax z17.h, p7/m, z17.h, z14.h\n"
- "fmax z18.h, p7/m, z18.h, z14.h\n"
- "fmax z19.h, p7/m, z19.h, z14.h\n"
- "fmin z16.h, p7/m, z16.h, z15.h\n"
- "fmin z17.h, p7/m, z17.h, z15.h\n"
- "fmin z18.h, p7/m, z18.h, z15.h\n"
- "fmin z19.h, p7/m, z19.h, z15.h\n"
- "st1h z16.h, p0, [%[c_ptr0]]\n"
- "fmax z20.h, p7/m, z20.h, z14.h\n"
- "fmax z21.h, p7/m, z21.h, z14.h\n"
- "fmax z22.h, p7/m, z22.h, z14.h\n"
- "st1h z17.h, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.h, p7/m, z23.h, z14.h\n"
- "fmin z20.h, p7/m, z20.h, z15.h\n"
- "fmin z21.h, p7/m, z21.h, z15.h\n"
- "st1h z18.h, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.h, p7/m, z22.h, z15.h\n"
- "fmin z23.h, p7/m, z23.h, z15.h\n"
- "st1h z19.h, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1h z20.h, p0, [c_ptr1]\n"
- "st1h z21.h, p1, [c_ptr1, #1, MUL VL]\n"
- "st1h z22.h, p2, [c_ptr1, #2, MUL VL]\n"
- "st1h z23.h, p3, [c_ptr1, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.h, %[temp], %[width]\n"
- "inch %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1h z16.h, p0/z, [%[biasptr]]\n"
- "whilelt p1.h, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "inch %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "ld1rqh z2.h, p7/z, [a_ptr2]\n"
- "whilelt p2.h, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "mov z25.d, z17.d\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "inch %[temp], all, mul #1\n"
- "ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.h, %[temp], %[width]\n"
- "mov z22.d, z18.d\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z26.d, z18.d\n"
- "ld1h z19.h, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "mov z27.d, z19.d\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z2.h, p7/z, [a_ptr2, #-0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "fmla z24.h, z12.h, z6.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "fmla z25.h, z13.h, z6.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "fmla z26.h, z14.h, z6.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "fmla z27.h, z15.h, z6.h[7]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z2.h, p6/z, [a_ptr2, #0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "fmla z24.h, z12.h, z6.h[7]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "fmla z25.h, z13.h, z6.h[7]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "fmla z26.h, z14.h, z6.h[7]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "fmla z27.h, z15.h, z6.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z6.h, p6/z, [a_ptr2]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "4:\n"
- "ld1rh z14.h, p7/z, [%[minptr]]\n"
- "ld1rh z15.h, p7/z, [%[maxptr]]\n"
- "fmax z16.h, p7/m, z16.h, z14.h\n"
- "fmax z17.h, p7/m, z17.h, z14.h\n"
- "fmax z18.h, p7/m, z18.h, z14.h\n"
- "fmax z19.h, p7/m, z19.h, z14.h\n"
- "fmin z16.h, p7/m, z16.h, z15.h\n"
- "fmin z17.h, p7/m, z17.h, z15.h\n"
- "fmin z18.h, p7/m, z18.h, z15.h\n"
- "fmin z19.h, p7/m, z19.h, z15.h\n"
- "st1h z16.h, p0, [%[c_ptr0]]\n"
- "fmax z20.h, p7/m, z20.h, z14.h\n"
- "fmax z21.h, p7/m, z21.h, z14.h\n"
- "fmax z22.h, p7/m, z22.h, z14.h\n"
- "st1h z17.h, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.h, p7/m, z23.h, z14.h\n"
- "fmin z20.h, p7/m, z20.h, z15.h\n"
- "fmin z21.h, p7/m, z21.h, z15.h\n"
- "st1h z18.h, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.h, p7/m, z22.h, z15.h\n"
- "fmin z23.h, p7/m, z23.h, z15.h\n"
- "fmax z24.h, p7/m, z24.h, z14.h\n"
- "st1h z19.h, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.h, p7/m, z25.h, z14.h\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.h, p7/m, z26.h, z14.h\n"
- "st1h z20.h, p0, [c_ptr1]\n"
- "fmin z24.h, p7/m, z24.h, z15.h\n"
- "fmin z25.h, p7/m, z25.h, z15.h\n"
- "fmax z27.h, p7/m, z27.h, z14.h\n"
- "st1h z21.h, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.h, p7/m, z26.h, z15.h\n"
- "fmin z27.h, p7/m, z27.h, z15.h\n"
- "st1h z22.h, p2, [c_ptr1, #2, MUL VL]\n"
- "st1h z23.h, p3, [c_ptr1, #3, MUL VL]\n"
- "st1h z24.h, p0, [c_ptr2]\n"
- "st1h z25.h, p1, [c_ptr2, #1, MUL VL]\n"
- "st1h z26.h, p2, [c_ptr2, #2, MUL VL]\n"
- "st1h z27.h, p3, [c_ptr2, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "whilelt p6.h, %[temp], %[leftovers]\n"
- "whilelt p0.h, %[temp], %[width]\n"
- "inch %[temp], all, mul #1\n"
- "ptrue p7.h\n"
- "ld1h z16.h, p0/z, [%[biasptr]]\n"
- "whilelt p1.h, %[temp], %[width]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0]]\n"
- "inch %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1h z17.h, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqh z1.h, p7/z, [a_ptr1]\n"
- "mov z28.d, z16.d\n"
- "ld1rqh z2.h, p7/z, [a_ptr2]\n"
- "ld1rqh z3.h, p7/z, [a_ptr3]\n"
- "whilelt p2.h, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "mov z25.d, z17.d\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z29.d, z17.d\n"
- "ld1h z18.h, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "inch %[temp], all, mul #1\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z22.d, z18.d\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z26.d, z18.d\n"
- "whilelt p3.h, %[temp], %[width]\n"
- "mov z30.d, z18.d\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "ld1h z19.h, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "mov z27.d, z19.d\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z31.d, z19.d\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z28.h, z8.h, z3.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z7.h, p7/z, [a_ptr3]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z29.h, z9.h, z3.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z30.h, z10.h, z3.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "fmla z31.h, z11.h, z3.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "fmla z28.h, z12.h, z3.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "fmla z29.h, z13.h, z3.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "fmla z30.h, z14.h, z3.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "fmla z31.h, z15.h, z3.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "fmla z28.h, z8.h, z3.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "fmla z29.h, z9.h, z3.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "fmla z30.h, z10.h, z3.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "fmla z31.h, z11.h, z3.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "fmla z28.h, z12.h, z3.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "fmla z29.h, z13.h, z3.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "fmla z30.h, z14.h, z3.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "fmla z31.h, z15.h, z3.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "fmla z28.h, z8.h, z3.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "fmla z29.h, z9.h, z3.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "fmla z30.h, z10.h, z3.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "fmla z31.h, z11.h, z3.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "fmla z28.h, z12.h, z3.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "fmla z29.h, z13.h, z3.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "fmla z30.h, z14.h, z3.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "fmla z31.h, z15.h, z3.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z28.h, z8.h, z3.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z29.h, z9.h, z3.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z30.h, z10.h, z3.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "fmla z31.h, z11.h, z3.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "fmla z28.h, z12.h, z3.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "fmla z29.h, z13.h, z3.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "fmla z30.h, z14.h, z3.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1rqh z1.h, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "ld1rqh z2.h, p7/z, [a_ptr2, #-0x10]\n"
- "fmla z31.h, z15.h, z3.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z3.h, p7/z, [a_ptr3, #-0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "fmla z28.h, z8.h, z7.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "fmla z29.h, z9.h, z7.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "fmla z30.h, z10.h, z7.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "fmla z31.h, z11.h, z7.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "fmla z28.h, z12.h, z7.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "fmla z29.h, z13.h, z7.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "fmla z30.h, z14.h, z7.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "fmla z31.h, z15.h, z7.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "fmla z28.h, z8.h, z7.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "fmla z29.h, z9.h, z7.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "fmla z30.h, z10.h, z7.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "fmla z31.h, z11.h, z7.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "fmla z28.h, z12.h, z7.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "fmla z29.h, z13.h, z7.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "fmla z30.h, z14.h, z7.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "fmla z31.h, z15.h, z7.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "fmla z28.h, z8.h, z7.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "fmla z29.h, z9.h, z7.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "fmla z30.h, z10.h, z7.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "fmla z31.h, z11.h, z7.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "fmla z28.h, z12.h, z7.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "fmla z29.h, z13.h, z7.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "fmla z30.h, z14.h, z7.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "fmla z31.h, z15.h, z7.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "fmla z28.h, z8.h, z7.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "fmla z29.h, z9.h, z7.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "fmla z30.h, z10.h, z7.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "fmla z31.h, z11.h, z7.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "fmla z24.h, z12.h, z6.h[7]\n"
- "fmla z28.h, z12.h, z7.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "fmla z25.h, z13.h, z6.h[7]\n"
- "fmla z29.h, z13.h, z7.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "fmla z26.h, z14.h, z6.h[7]\n"
- "fmla z30.h, z14.h, z7.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "fmla z27.h, z15.h, z6.h[7]\n"
- "fmla z31.h, z15.h, z7.h[7]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p7/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p7/z, [a_ptr1]\n"
- "fmla z28.h, z8.h, z3.h[0]\n"
- "ld1rqh z6.h, p7/z, [a_ptr2]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z7.h, p7/z, [a_ptr3]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z29.h, z9.h, z3.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "fmla z30.h, z10.h, z3.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "fmla z31.h, z11.h, z3.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "fmla z28.h, z12.h, z3.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "fmla z29.h, z13.h, z3.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "fmla z30.h, z14.h, z3.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "fmla z31.h, z15.h, z3.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "fmla z28.h, z8.h, z3.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "fmla z29.h, z9.h, z3.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "fmla z30.h, z10.h, z3.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "fmla z31.h, z11.h, z3.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "fmla z28.h, z12.h, z3.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "fmla z29.h, z13.h, z3.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "fmla z30.h, z14.h, z3.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "fmla z31.h, z15.h, z3.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "fmla z28.h, z8.h, z3.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "fmla z29.h, z9.h, z3.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "fmla z30.h, z10.h, z3.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "fmla z31.h, z11.h, z3.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "fmla z28.h, z12.h, z3.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "fmla z29.h, z13.h, z3.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "fmla z30.h, z14.h, z3.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "fmla z31.h, z15.h, z3.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z28.h, z8.h, z3.h[6]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z29.h, z9.h, z3.h[6]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z30.h, z10.h, z3.h[6]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "fmla z31.h, z11.h, z3.h[6]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "fmla z28.h, z12.h, z3.h[7]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "fmla z29.h, z13.h, z3.h[7]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "fmla z30.h, z14.h, z3.h[7]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "ld1rqh z0.h, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "ld1rqh z1.h, p6/z, [a_ptr1, #0x10]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "ld1rqh z2.h, p6/z, [a_ptr2, #0x10]\n"
- "fmla z31.h, z15.h, z3.h[7]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "ld1rqh z3.h, p6/z, [a_ptr3, #0x10]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z28.h, z8.h, z7.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "addvl a_ptr3, a_ptr3, #2\n"
- "fmla z29.h, z9.h, z7.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "fmla z30.h, z10.h, z7.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "fmla z31.h, z11.h, z7.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "fmla z28.h, z12.h, z7.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "fmla z29.h, z13.h, z7.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "fmla z30.h, z14.h, z7.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "fmla z31.h, z15.h, z7.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "fmla z28.h, z8.h, z7.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "fmla z29.h, z9.h, z7.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "fmla z30.h, z10.h, z7.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "fmla z31.h, z11.h, z7.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "fmla z28.h, z12.h, z7.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "fmla z29.h, z13.h, z7.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "fmla z30.h, z14.h, z7.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "fmla z31.h, z15.h, z7.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "fmla z28.h, z8.h, z7.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "fmla z29.h, z9.h, z7.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "fmla z30.h, z10.h, z7.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "fmla z31.h, z11.h, z7.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "fmla z28.h, z12.h, z7.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "fmla z29.h, z13.h, z7.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "fmla z30.h, z14.h, z7.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "fmla z31.h, z15.h, z7.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "fmla z28.h, z8.h, z7.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "fmla z29.h, z9.h, z7.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "fmla z30.h, z10.h, z7.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "fmla z31.h, z11.h, z7.h[6]\n"
- "fmla z16.h, z12.h, z4.h[7]\n"
- "fmla z20.h, z12.h, z5.h[7]\n"
- "fmla z24.h, z12.h, z6.h[7]\n"
- "fmla z28.h, z12.h, z7.h[7]\n"
- "fmla z17.h, z13.h, z4.h[7]\n"
- "fmla z21.h, z13.h, z5.h[7]\n"
- "fmla z25.h, z13.h, z6.h[7]\n"
- "fmla z29.h, z13.h, z7.h[7]\n"
- "fmla z18.h, z14.h, z4.h[7]\n"
- "fmla z22.h, z14.h, z5.h[7]\n"
- "fmla z26.h, z14.h, z6.h[7]\n"
- "fmla z30.h, z14.h, z7.h[7]\n"
- "fmla z19.h, z15.h, z4.h[7]\n"
- "fmla z23.h, z15.h, z5.h[7]\n"
- "fmla z27.h, z15.h, z6.h[7]\n"
- "fmla z31.h, z15.h, z7.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "fmla z28.h, z8.h, z3.h[0]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "fmla z29.h, z9.h, z3.h[0]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "fmla z30.h, z10.h, z3.h[0]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "fmla z31.h, z11.h, z3.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "fmla z28.h, z12.h, z3.h[1]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "fmla z29.h, z13.h, z3.h[1]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "fmla z30.h, z14.h, z3.h[1]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "fmla z31.h, z15.h, z3.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "fmla z28.h, z8.h, z3.h[2]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "fmla z29.h, z9.h, z3.h[2]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "fmla z30.h, z10.h, z3.h[2]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "fmla z31.h, z11.h, z3.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "fmla z28.h, z12.h, z3.h[3]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "fmla z29.h, z13.h, z3.h[3]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "fmla z30.h, z14.h, z3.h[3]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "fmla z31.h, z15.h, z3.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "fmla z28.h, z8.h, z3.h[4]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "fmla z29.h, z9.h, z3.h[4]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "fmla z30.h, z10.h, z3.h[4]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "fmla z31.h, z11.h, z3.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "fmla z28.h, z12.h, z3.h[5]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "fmla z29.h, z13.h, z3.h[5]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "fmla z30.h, z14.h, z3.h[5]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "fmla z31.h, z15.h, z3.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z28.h, z8.h, z3.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z29.h, z9.h, z3.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z30.h, z10.h, z3.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "fmla z31.h, z11.h, z3.h[6]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.h, z8.h, z0.h[0]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.h, z8.h, z1.h[0]\n"
- "ld1rqh z4.h, p6/z, [%[a_ptr0]]\n"
- "fmla z24.h, z8.h, z2.h[0]\n"
- "ld1rqh z5.h, p6/z, [a_ptr1]\n"
- "fmla z28.h, z8.h, z3.h[0]\n"
- "ld1rqh z6.h, p6/z, [a_ptr2]\n"
- "fmla z17.h, z9.h, z0.h[0]\n"
- "ld1rqh z7.h, p6/z, [a_ptr3]\n"
- "fmla z21.h, z9.h, z1.h[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.h, z9.h, z2.h[0]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z29.h, z9.h, z3.h[0]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z22.h, z10.h, z1.h[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z26.h, z10.h, z2.h[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "fmla z30.h, z10.h, z3.h[0]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[0]\n"
- "addvl a_ptr3, a_ptr3, #1\n"
- "fmla z23.h, z11.h, z1.h[0]\n"
- "fmla z27.h, z11.h, z2.h[0]\n"
- "fmla z31.h, z11.h, z3.h[0]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[1]\n"
- "fmla z24.h, z12.h, z2.h[1]\n"
- "fmla z28.h, z12.h, z3.h[1]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[1]\n"
- "fmla z21.h, z13.h, z1.h[1]\n"
- "fmla z25.h, z13.h, z2.h[1]\n"
- "fmla z29.h, z13.h, z3.h[1]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[1]\n"
- "fmla z22.h, z14.h, z1.h[1]\n"
- "fmla z26.h, z14.h, z2.h[1]\n"
- "fmla z30.h, z14.h, z3.h[1]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[1]\n"
- "fmla z23.h, z15.h, z1.h[1]\n"
- "fmla z27.h, z15.h, z2.h[1]\n"
- "fmla z31.h, z15.h, z3.h[1]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[2]\n"
- "fmla z24.h, z8.h, z2.h[2]\n"
- "fmla z28.h, z8.h, z3.h[2]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[2]\n"
- "fmla z21.h, z9.h, z1.h[2]\n"
- "fmla z25.h, z9.h, z2.h[2]\n"
- "fmla z29.h, z9.h, z3.h[2]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[2]\n"
- "fmla z22.h, z10.h, z1.h[2]\n"
- "fmla z26.h, z10.h, z2.h[2]\n"
- "fmla z30.h, z10.h, z3.h[2]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[2]\n"
- "fmla z23.h, z11.h, z1.h[2]\n"
- "fmla z27.h, z11.h, z2.h[2]\n"
- "fmla z31.h, z11.h, z3.h[2]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[3]\n"
- "fmla z24.h, z12.h, z2.h[3]\n"
- "fmla z28.h, z12.h, z3.h[3]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[3]\n"
- "fmla z21.h, z13.h, z1.h[3]\n"
- "fmla z25.h, z13.h, z2.h[3]\n"
- "fmla z29.h, z13.h, z3.h[3]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[3]\n"
- "fmla z22.h, z14.h, z1.h[3]\n"
- "fmla z26.h, z14.h, z2.h[3]\n"
- "fmla z30.h, z14.h, z3.h[3]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[3]\n"
- "fmla z23.h, z15.h, z1.h[3]\n"
- "fmla z27.h, z15.h, z2.h[3]\n"
- "fmla z31.h, z15.h, z3.h[3]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[4]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z8.h, z1.h[4]\n"
- "fmla z24.h, z8.h, z2.h[4]\n"
- "fmla z28.h, z8.h, z3.h[4]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z9.h, z0.h[4]\n"
- "fmla z21.h, z9.h, z1.h[4]\n"
- "fmla z25.h, z9.h, z2.h[4]\n"
- "fmla z29.h, z9.h, z3.h[4]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z10.h, z0.h[4]\n"
- "fmla z22.h, z10.h, z1.h[4]\n"
- "fmla z26.h, z10.h, z2.h[4]\n"
- "fmla z30.h, z10.h, z3.h[4]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z11.h, z0.h[4]\n"
- "fmla z23.h, z11.h, z1.h[4]\n"
- "fmla z27.h, z11.h, z2.h[4]\n"
- "fmla z31.h, z11.h, z3.h[4]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z0.h[5]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.h, z12.h, z1.h[5]\n"
- "fmla z24.h, z12.h, z2.h[5]\n"
- "fmla z28.h, z12.h, z3.h[5]\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "fmla z17.h, z13.h, z0.h[5]\n"
- "fmla z21.h, z13.h, z1.h[5]\n"
- "fmla z25.h, z13.h, z2.h[5]\n"
- "fmla z29.h, z13.h, z3.h[5]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.h, z14.h, z0.h[5]\n"
- "fmla z22.h, z14.h, z1.h[5]\n"
- "fmla z26.h, z14.h, z2.h[5]\n"
- "fmla z30.h, z14.h, z3.h[5]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.h, z15.h, z0.h[5]\n"
- "fmla z23.h, z15.h, z1.h[5]\n"
- "fmla z27.h, z15.h, z2.h[5]\n"
- "fmla z31.h, z15.h, z3.h[5]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z0.h[6]\n"
- "fmla z20.h, z8.h, z1.h[6]\n"
- "fmla z24.h, z8.h, z2.h[6]\n"
- "fmla z28.h, z8.h, z3.h[6]\n"
- "fmla z17.h, z9.h, z0.h[6]\n"
- "fmla z21.h, z9.h, z1.h[6]\n"
- "fmla z25.h, z9.h, z2.h[6]\n"
- "fmla z29.h, z9.h, z3.h[6]\n"
- "fmla z18.h, z10.h, z0.h[6]\n"
- "fmla z22.h, z10.h, z1.h[6]\n"
- "fmla z26.h, z10.h, z2.h[6]\n"
- "fmla z30.h, z10.h, z3.h[6]\n"
- "fmla z19.h, z11.h, z0.h[6]\n"
- "fmla z23.h, z11.h, z1.h[6]\n"
- "fmla z27.h, z11.h, z2.h[6]\n"
- "fmla z31.h, z11.h, z3.h[6]\n"
- "fmla z16.h, z12.h, z0.h[7]\n"
- "fmla z20.h, z12.h, z1.h[7]\n"
- "fmla z24.h, z12.h, z2.h[7]\n"
- "fmla z28.h, z12.h, z3.h[7]\n"
- "fmla z17.h, z13.h, z0.h[7]\n"
- "fmla z21.h, z13.h, z1.h[7]\n"
- "fmla z25.h, z13.h, z2.h[7]\n"
- "fmla z29.h, z13.h, z3.h[7]\n"
- "fmla z18.h, z14.h, z0.h[7]\n"
- "fmla z22.h, z14.h, z1.h[7]\n"
- "fmla z26.h, z14.h, z2.h[7]\n"
- "fmla z30.h, z14.h, z3.h[7]\n"
- "fmla z19.h, z15.h, z0.h[7]\n"
- "fmla z23.h, z15.h, z1.h[7]\n"
- "fmla z27.h, z15.h, z2.h[7]\n"
- "fmla z31.h, z15.h, z3.h[7]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[0]\n"
- "fmla z20.h, z8.h, z5.h[0]\n"
- "fmla z24.h, z8.h, z6.h[0]\n"
- "fmla z28.h, z8.h, z7.h[0]\n"
- "fmla z17.h, z9.h, z4.h[0]\n"
- "fmla z21.h, z9.h, z5.h[0]\n"
- "fmla z25.h, z9.h, z6.h[0]\n"
- "fmla z29.h, z9.h, z7.h[0]\n"
- "fmla z18.h, z10.h, z4.h[0]\n"
- "fmla z22.h, z10.h, z5.h[0]\n"
- "fmla z26.h, z10.h, z6.h[0]\n"
- "fmla z30.h, z10.h, z7.h[0]\n"
- "fmla z19.h, z11.h, z4.h[0]\n"
- "fmla z23.h, z11.h, z5.h[0]\n"
- "fmla z27.h, z11.h, z6.h[0]\n"
- "fmla z31.h, z11.h, z7.h[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[1]\n"
- "fmla z20.h, z12.h, z5.h[1]\n"
- "fmla z24.h, z12.h, z6.h[1]\n"
- "fmla z28.h, z12.h, z7.h[1]\n"
- "fmla z17.h, z13.h, z4.h[1]\n"
- "fmla z21.h, z13.h, z5.h[1]\n"
- "fmla z25.h, z13.h, z6.h[1]\n"
- "fmla z29.h, z13.h, z7.h[1]\n"
- "fmla z18.h, z14.h, z4.h[1]\n"
- "fmla z22.h, z14.h, z5.h[1]\n"
- "fmla z26.h, z14.h, z6.h[1]\n"
- "fmla z30.h, z14.h, z7.h[1]\n"
- "fmla z19.h, z15.h, z4.h[1]\n"
- "fmla z23.h, z15.h, z5.h[1]\n"
- "fmla z27.h, z15.h, z6.h[1]\n"
- "fmla z31.h, z15.h, z7.h[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[2]\n"
- "fmla z20.h, z8.h, z5.h[2]\n"
- "fmla z24.h, z8.h, z6.h[2]\n"
- "fmla z28.h, z8.h, z7.h[2]\n"
- "fmla z17.h, z9.h, z4.h[2]\n"
- "fmla z21.h, z9.h, z5.h[2]\n"
- "fmla z25.h, z9.h, z6.h[2]\n"
- "fmla z29.h, z9.h, z7.h[2]\n"
- "fmla z18.h, z10.h, z4.h[2]\n"
- "fmla z22.h, z10.h, z5.h[2]\n"
- "fmla z26.h, z10.h, z6.h[2]\n"
- "fmla z30.h, z10.h, z7.h[2]\n"
- "fmla z19.h, z11.h, z4.h[2]\n"
- "fmla z23.h, z11.h, z5.h[2]\n"
- "fmla z27.h, z11.h, z6.h[2]\n"
- "fmla z31.h, z11.h, z7.h[2]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[3]\n"
- "fmla z20.h, z12.h, z5.h[3]\n"
- "fmla z24.h, z12.h, z6.h[3]\n"
- "fmla z28.h, z12.h, z7.h[3]\n"
- "fmla z17.h, z13.h, z4.h[3]\n"
- "fmla z21.h, z13.h, z5.h[3]\n"
- "fmla z25.h, z13.h, z6.h[3]\n"
- "fmla z29.h, z13.h, z7.h[3]\n"
- "fmla z18.h, z14.h, z4.h[3]\n"
- "fmla z22.h, z14.h, z5.h[3]\n"
- "fmla z26.h, z14.h, z6.h[3]\n"
- "fmla z30.h, z14.h, z7.h[3]\n"
- "fmla z19.h, z15.h, z4.h[3]\n"
- "fmla z23.h, z15.h, z5.h[3]\n"
- "fmla z27.h, z15.h, z6.h[3]\n"
- "fmla z31.h, z15.h, z7.h[3]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[4]\n"
- "fmla z20.h, z8.h, z5.h[4]\n"
- "fmla z24.h, z8.h, z6.h[4]\n"
- "fmla z28.h, z8.h, z7.h[4]\n"
- "fmla z17.h, z9.h, z4.h[4]\n"
- "fmla z21.h, z9.h, z5.h[4]\n"
- "fmla z25.h, z9.h, z6.h[4]\n"
- "fmla z29.h, z9.h, z7.h[4]\n"
- "fmla z18.h, z10.h, z4.h[4]\n"
- "fmla z22.h, z10.h, z5.h[4]\n"
- "fmla z26.h, z10.h, z6.h[4]\n"
- "fmla z30.h, z10.h, z7.h[4]\n"
- "fmla z19.h, z11.h, z4.h[4]\n"
- "fmla z23.h, z11.h, z5.h[4]\n"
- "fmla z27.h, z11.h, z6.h[4]\n"
- "fmla z31.h, z11.h, z7.h[4]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1h z12.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z13.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z14.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z15.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z12.h, z4.h[5]\n"
- "fmla z20.h, z12.h, z5.h[5]\n"
- "fmla z24.h, z12.h, z6.h[5]\n"
- "fmla z28.h, z12.h, z7.h[5]\n"
- "fmla z17.h, z13.h, z4.h[5]\n"
- "fmla z21.h, z13.h, z5.h[5]\n"
- "fmla z25.h, z13.h, z6.h[5]\n"
- "fmla z29.h, z13.h, z7.h[5]\n"
- "fmla z18.h, z14.h, z4.h[5]\n"
- "fmla z22.h, z14.h, z5.h[5]\n"
- "fmla z26.h, z14.h, z6.h[5]\n"
- "fmla z30.h, z14.h, z7.h[5]\n"
- "fmla z19.h, z15.h, z4.h[5]\n"
- "fmla z23.h, z15.h, z5.h[5]\n"
- "fmla z27.h, z15.h, z6.h[5]\n"
- "fmla z31.h, z15.h, z7.h[5]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1h z8.h, p0/z, [%[b_ptr0]]\n"
- "ld1h z9.h, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1h z10.h, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1h z11.h, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.h, z8.h, z4.h[6]\n"
- "fmla z20.h, z8.h, z5.h[6]\n"
- "fmla z24.h, z8.h, z6.h[6]\n"
- "fmla z28.h, z8.h, z7.h[6]\n"
- "fmla z17.h, z9.h, z4.h[6]\n"
- "fmla z21.h, z9.h, z5.h[6]\n"
- "fmla z25.h, z9.h, z6.h[6]\n"
- "fmla z29.h, z9.h, z7.h[6]\n"
- "fmla z18.h, z10.h, z4.h[6]\n"
- "fmla z22.h, z10.h, z5.h[6]\n"
- "fmla z26.h, z10.h, z6.h[6]\n"
- "fmla z30.h, z10.h, z7.h[6]\n"
- "fmla z19.h, z11.h, z4.h[6]\n"
- "fmla z23.h, z11.h, z5.h[6]\n"
- "fmla z27.h, z11.h, z6.h[6]\n"
- "fmla z31.h, z11.h, z7.h[6]\n"
- "4:\n"
- "ld1rh z14.h, p7/z, [%[minptr]]\n"
- "ld1rh z15.h, p7/z, [%[maxptr]]\n"
- "fmax z16.h, p7/m, z16.h, z14.h\n"
- "fmax z17.h, p7/m, z17.h, z14.h\n"
- "fmax z18.h, p7/m, z18.h, z14.h\n"
- "fmax z19.h, p7/m, z19.h, z14.h\n"
- "fmin z16.h, p7/m, z16.h, z15.h\n"
- "fmin z17.h, p7/m, z17.h, z15.h\n"
- "fmin z18.h, p7/m, z18.h, z15.h\n"
- "fmin z19.h, p7/m, z19.h, z15.h\n"
- "st1h z16.h, p0, [%[c_ptr0]]\n"
- "fmax z20.h, p7/m, z20.h, z14.h\n"
- "fmax z21.h, p7/m, z21.h, z14.h\n"
- "fmax z22.h, p7/m, z22.h, z14.h\n"
- "st1h z17.h, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.h, p7/m, z23.h, z14.h\n"
- "fmin z20.h, p7/m, z20.h, z15.h\n"
- "fmin z21.h, p7/m, z21.h, z15.h\n"
- "st1h z18.h, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.h, p7/m, z22.h, z15.h\n"
- "fmin z23.h, p7/m, z23.h, z15.h\n"
- "fmax z24.h, p7/m, z24.h, z14.h\n"
- "st1h z19.h, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.h, p7/m, z25.h, z14.h\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.h, p7/m, z26.h, z14.h\n"
- "st1h z20.h, p0, [c_ptr1]\n"
- "fmin z24.h, p7/m, z24.h, z15.h\n"
- "fmin z25.h, p7/m, z25.h, z15.h\n"
- "fmax z27.h, p7/m, z27.h, z14.h\n"
- "st1h z21.h, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.h, p7/m, z26.h, z15.h\n"
- "fmax z28.h, p7/m, z28.h, z14.h\n"
- "fmax z29.h, p7/m, z29.h, z14.h\n"
- "st1h z22.h, p2, [c_ptr1, #2, MUL VL]\n"
- "fmin z27.h, p7/m, z27.h, z15.h\n"
- "fmax z30.h, p7/m, z30.h, z14.h\n"
- "fmin z28.h, p7/m, z28.h, z15.h\n"
- "st1h z23.h, p3, [c_ptr1, #3, MUL VL]\n"
- "fmin z29.h, p7/m, z29.h, z15.h\n"
- "fmax z31.h, p7/m, z31.h, z14.h\n"
- "fmin z30.h, p7/m, z30.h, z15.h\n"
- "st1h z24.h, p0, [c_ptr2]\n"
- "fmin z31.h, p7/m, z31.h, z15.h\n"
- "st1h z25.h, p1, [c_ptr2, #1, MUL VL]\n"
- "st1h z26.h, p2, [c_ptr2, #2, MUL VL]\n"
- "st1h z27.h, p3, [c_ptr2, #3, MUL VL]\n"
- "st1h z28.h, p0, [c_ptr3]\n"
- "st1h z29.h, p1, [c_ptr3, #1, MUL VL]\n"
- "st1h z30.h, p2, [c_ptr3, #2, MUL VL]\n"
- "st1h z31.h, p3, [c_ptr3, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4.hpp
deleted file mode 100644
index 0abde56af1..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void sve_native_fp32_mla_4VLx4(const float *, int, const float *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
-class native_fp32_mla_4VLx4
-{
-public:
- typedef float operand_type;
- typedef float result_type;
-
- typedef void (*kern_type)(const float *, int, const float *, int ldb, float *, int, int, int, int, const float *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return get_vector_length<float>() * 4;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 1;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return true;
- }
-
- static constexpr bool supports_activation()
- {
- return true;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=sve_native_fp32_mla_4VLx4;
-
- native_fp32_mla_4VLx4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
deleted file mode 100644
index b05906e199..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_fp32_mla_4VLx4/generic.cpp
+++ /dev/null
@@ -1,2070 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_SVE
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void sve_native_fp32_mla_4VLx4(const float *A, int lda, const float *B, int ldb, float *C, int ldc, int M, int N, int K, const float *bias, Activation act, bool append) {
- const long loops_count = ((K + 4) / 8) - 1;
- K -= loops_count * 8;
- const long regs_count = (K / 4) - 1;
- K -= (regs_count + 1) * 4;
- const long leftovers = K;
- float nullbias[256];
- if (!append && !bias) {
- memset(nullbias, 0, (4 * get_vector_length<float>() * sizeof(float)));
- }
- float minval = - static_cast<float>(std::numeric_limits<float>::infinity());
- float maxval = static_cast<float>(std::numeric_limits<float>::infinity());
- const float * const minptr = &minval;
- const float * const maxptr = &maxval;
-
- switch(act.type)
- {
- default:
- case Activation::Type::None:
- break;
- case Activation::Type::BoundedReLU:
- maxval = static_cast<float>(act.param1);
- /* fall through */
- case Activation::Type::ReLU:
- minval = 0.0f;
- break;
- }
-
- int rows_to_compute;
-
- for (int y=0; y<M; y+=rows_to_compute) {
- const float * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(float);
-
- float *c_ptr0 = C + (y * ldc);
-
- rows_to_compute = M-y;
- if (rows_to_compute > 4) {
- if (rows_to_compute % 4) {
- rows_to_compute = 4 - 1;
- } else {
- rows_to_compute = 4;
- }
- }
-
- for (int x0=0; x0<N; x0+=(4 * get_vector_length<float>())) {
- const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<float>()));
- long loops = loops_count;
- long regs = regs_count;
- long temp = 0;
- long blocks = leftovers;
- const float *a_ptr0 = a_ptr0_base;
- const float *b_ptr0 = B + x0;
- long ldbb = ldb * sizeof(float);
- const unsigned long ldcb = ldc * sizeof(float);
- const float *biasptr = bias ? bias+x0 : nullbias;
-
- switch(rows_to_compute) {
- case 1:
- __asm __volatile (
- "whilelt p6.s, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.s\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "subs %[loops], %[loops], #0x1\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[regs], 3f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "4:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "whilelt p6.s, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.s\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "mov z22.d, z18.d\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z1.s, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "4:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "whilelt p6.s, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.s\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z25.d, z17.d\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "mov z22.d, z18.d\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z26.d, z18.d\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "mov z27.d, z19.d\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1rqw z1.s, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z2.s, p7/z, [a_ptr2, #-0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "fmla z24.s, z12.s, z6.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "fmla z25.s, z13.s, z6.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "fmla z26.s, z14.s, z6.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "fmla z27.s, z15.s, z6.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "fmla z24.s, z12.s, z6.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "fmla z25.s, z13.s, z6.s[3]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "fmla z26.s, z14.s, z6.s[3]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "fmla z27.s, z15.s, z6.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z6.s, p6/z, [a_ptr2]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "4:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "fmax z24.s, p7/m, z24.s, z14.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.s, p7/m, z25.s, z14.s\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.s, p7/m, z26.s, z14.s\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "fmin z24.s, p7/m, z24.s, z15.s\n"
- "fmin z25.s, p7/m, z25.s, z15.s\n"
- "fmax z27.s, p7/m, z27.s, z14.s\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.s, p7/m, z26.s, z15.s\n"
- "fmin z27.s, p7/m, z27.s, z15.s\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "whilelt p6.s, %[temp], %[leftovers]\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.s\n"
- "ld1w z16.s, p0/z, [%[biasptr]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "mov z20.d, z16.d\n"
- "ld1w z17.s, p1/z, [%[biasptr], #1, MUL VL]\n"
- "mov z24.d, z16.d\n"
- "ld1rqw z1.s, p7/z, [a_ptr1]\n"
- "mov z28.d, z16.d\n"
- "ld1rqw z2.s, p7/z, [a_ptr2]\n"
- "ld1rqw z3.s, p7/z, [a_ptr3]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "mov z21.d, z17.d\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "mov z25.d, z17.d\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z29.d, z17.d\n"
- "ld1w z18.s, p2/z, [%[biasptr], #2, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "incw %[temp], all, mul #1\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "mov z22.d, z18.d\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "mov z26.d, z18.d\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "mov z30.d, z18.d\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "ld1w z19.s, p3/z, [%[biasptr], #3, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z23.d, z19.d\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "mov z27.d, z19.d\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "mov z31.d, z19.d\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z7.s, p7/z, [a_ptr3]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z29.s, z9.s, z3.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "fmla z30.s, z10.s, z3.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "fmla z31.s, z11.s, z3.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z28.s, z12.s, z3.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "fmla z29.s, z13.s, z3.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "fmla z30.s, z14.s, z3.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "fmla z31.s, z15.s, z3.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z28.s, z8.s, z3.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z29.s, z9.s, z3.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z30.s, z10.s, z3.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z3.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "fmla z28.s, z12.s, z3.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "fmla z29.s, z13.s, z3.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "fmla z30.s, z14.s, z3.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p7/z, [%[a_ptr0], #-0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1rqw z1.s, p7/z, [a_ptr1, #-0x10]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "ld1rqw z2.s, p7/z, [a_ptr2, #-0x10]\n"
- "fmla z31.s, z15.s, z3.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z3.s, p7/z, [a_ptr3, #-0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "fmla z28.s, z8.s, z7.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "fmla z29.s, z9.s, z7.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "fmla z30.s, z10.s, z7.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "fmla z31.s, z11.s, z7.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "fmla z28.s, z12.s, z7.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "fmla z29.s, z13.s, z7.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "fmla z30.s, z14.s, z7.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "fmla z31.s, z15.s, z7.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "fmla z28.s, z8.s, z7.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "fmla z29.s, z9.s, z7.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "fmla z30.s, z10.s, z7.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "fmla z31.s, z11.s, z7.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "fmla z24.s, z12.s, z6.s[3]\n"
- "fmla z28.s, z12.s, z7.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "fmla z25.s, z13.s, z6.s[3]\n"
- "fmla z29.s, z13.s, z7.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "fmla z26.s, z14.s, z6.s[3]\n"
- "fmla z30.s, z14.s, z7.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "fmla z27.s, z15.s, z6.s[3]\n"
- "fmla z31.s, z15.s, z7.s[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "cbz %[regs], 3f\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p7/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p7/z, [a_ptr1]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z6.s, p7/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z7.s, p7/z, [a_ptr3]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z29.s, z9.s, z3.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "fmla z30.s, z10.s, z3.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "fmla z31.s, z11.s, z3.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z28.s, z12.s, z3.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "fmla z29.s, z13.s, z3.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "fmla z30.s, z14.s, z3.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "fmla z31.s, z15.s, z3.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z28.s, z8.s, z3.s[2]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z29.s, z9.s, z3.s[2]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z30.s, z10.s, z3.s[2]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z3.s[2]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "fmla z28.s, z12.s, z3.s[3]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "fmla z29.s, z13.s, z3.s[3]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "fmla z30.s, z14.s, z3.s[3]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "ld1rqw z0.s, p6/z, [%[a_ptr0], #0x10]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "ld1rqw z1.s, p6/z, [a_ptr1, #0x10]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "ld1rqw z2.s, p6/z, [a_ptr2, #0x10]\n"
- "fmla z31.s, z15.s, z3.s[3]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "ld1rqw z3.s, p6/z, [a_ptr3, #0x10]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "fmla z28.s, z8.s, z7.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "addvl a_ptr3, a_ptr3, #2\n"
- "fmla z29.s, z9.s, z7.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "fmla z30.s, z10.s, z7.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "fmla z31.s, z11.s, z7.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "fmla z28.s, z12.s, z7.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "fmla z29.s, z13.s, z7.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "fmla z30.s, z14.s, z7.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "fmla z31.s, z15.s, z7.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "fmla z28.s, z8.s, z7.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "fmla z29.s, z9.s, z7.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "fmla z30.s, z10.s, z7.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "fmla z31.s, z11.s, z7.s[2]\n"
- "fmla z16.s, z12.s, z4.s[3]\n"
- "fmla z20.s, z12.s, z5.s[3]\n"
- "fmla z24.s, z12.s, z6.s[3]\n"
- "fmla z28.s, z12.s, z7.s[3]\n"
- "fmla z17.s, z13.s, z4.s[3]\n"
- "fmla z21.s, z13.s, z5.s[3]\n"
- "fmla z25.s, z13.s, z6.s[3]\n"
- "fmla z29.s, z13.s, z7.s[3]\n"
- "fmla z18.s, z14.s, z4.s[3]\n"
- "fmla z22.s, z14.s, z5.s[3]\n"
- "fmla z26.s, z14.s, z6.s[3]\n"
- "fmla z30.s, z14.s, z7.s[3]\n"
- "fmla z19.s, z15.s, z4.s[3]\n"
- "fmla z23.s, z15.s, z5.s[3]\n"
- "fmla z27.s, z15.s, z6.s[3]\n"
- "fmla z31.s, z15.s, z7.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "fmla z29.s, z9.s, z3.s[0]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "fmla z30.s, z10.s, z3.s[0]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "fmla z31.s, z11.s, z3.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z28.s, z12.s, z3.s[1]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "fmla z29.s, z13.s, z3.s[1]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "fmla z30.s, z14.s, z3.s[1]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "fmla z31.s, z15.s, z3.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z29.s, z9.s, z3.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z30.s, z10.s, z3.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z3.s[2]\n"
- "b 4f\n"
- "3:\n"
- "fmla z16.s, z8.s, z0.s[0]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z20.s, z8.s, z1.s[0]\n"
- "ld1rqw z4.s, p6/z, [%[a_ptr0]]\n"
- "fmla z24.s, z8.s, z2.s[0]\n"
- "ld1rqw z5.s, p6/z, [a_ptr1]\n"
- "fmla z28.s, z8.s, z3.s[0]\n"
- "ld1rqw z6.s, p6/z, [a_ptr2]\n"
- "fmla z17.s, z9.s, z0.s[0]\n"
- "ld1rqw z7.s, p6/z, [a_ptr3]\n"
- "fmla z21.s, z9.s, z1.s[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z25.s, z9.s, z2.s[0]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "fmla z29.s, z9.s, z3.s[0]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z10.s, z0.s[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "fmla z22.s, z10.s, z1.s[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "fmla z26.s, z10.s, z2.s[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "fmla z30.s, z10.s, z3.s[0]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z11.s, z0.s[0]\n"
- "addvl a_ptr3, a_ptr3, #1\n"
- "fmla z23.s, z11.s, z1.s[0]\n"
- "fmla z27.s, z11.s, z2.s[0]\n"
- "fmla z31.s, z11.s, z3.s[0]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z0.s[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "fmla z20.s, z12.s, z1.s[1]\n"
- "fmla z24.s, z12.s, z2.s[1]\n"
- "fmla z28.s, z12.s, z3.s[1]\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "fmla z17.s, z13.s, z0.s[1]\n"
- "fmla z21.s, z13.s, z1.s[1]\n"
- "fmla z25.s, z13.s, z2.s[1]\n"
- "fmla z29.s, z13.s, z3.s[1]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "fmla z18.s, z14.s, z0.s[1]\n"
- "fmla z22.s, z14.s, z1.s[1]\n"
- "fmla z26.s, z14.s, z2.s[1]\n"
- "fmla z30.s, z14.s, z3.s[1]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "fmla z19.s, z15.s, z0.s[1]\n"
- "fmla z23.s, z15.s, z1.s[1]\n"
- "fmla z27.s, z15.s, z2.s[1]\n"
- "fmla z31.s, z15.s, z3.s[1]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z0.s[2]\n"
- "fmla z20.s, z8.s, z1.s[2]\n"
- "fmla z24.s, z8.s, z2.s[2]\n"
- "fmla z28.s, z8.s, z3.s[2]\n"
- "fmla z17.s, z9.s, z0.s[2]\n"
- "fmla z21.s, z9.s, z1.s[2]\n"
- "fmla z25.s, z9.s, z2.s[2]\n"
- "fmla z29.s, z9.s, z3.s[2]\n"
- "fmla z18.s, z10.s, z0.s[2]\n"
- "fmla z22.s, z10.s, z1.s[2]\n"
- "fmla z26.s, z10.s, z2.s[2]\n"
- "fmla z30.s, z10.s, z3.s[2]\n"
- "fmla z19.s, z11.s, z0.s[2]\n"
- "fmla z23.s, z11.s, z1.s[2]\n"
- "fmla z27.s, z11.s, z2.s[2]\n"
- "fmla z31.s, z11.s, z3.s[2]\n"
- "fmla z16.s, z12.s, z0.s[3]\n"
- "fmla z20.s, z12.s, z1.s[3]\n"
- "fmla z24.s, z12.s, z2.s[3]\n"
- "fmla z28.s, z12.s, z3.s[3]\n"
- "fmla z17.s, z13.s, z0.s[3]\n"
- "fmla z21.s, z13.s, z1.s[3]\n"
- "fmla z25.s, z13.s, z2.s[3]\n"
- "fmla z29.s, z13.s, z3.s[3]\n"
- "fmla z18.s, z14.s, z0.s[3]\n"
- "fmla z22.s, z14.s, z1.s[3]\n"
- "fmla z26.s, z14.s, z2.s[3]\n"
- "fmla z30.s, z14.s, z3.s[3]\n"
- "fmla z19.s, z15.s, z0.s[3]\n"
- "fmla z23.s, z15.s, z1.s[3]\n"
- "fmla z27.s, z15.s, z2.s[3]\n"
- "fmla z31.s, z15.s, z3.s[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[0]\n"
- "fmla z20.s, z8.s, z5.s[0]\n"
- "fmla z24.s, z8.s, z6.s[0]\n"
- "fmla z28.s, z8.s, z7.s[0]\n"
- "fmla z17.s, z9.s, z4.s[0]\n"
- "fmla z21.s, z9.s, z5.s[0]\n"
- "fmla z25.s, z9.s, z6.s[0]\n"
- "fmla z29.s, z9.s, z7.s[0]\n"
- "fmla z18.s, z10.s, z4.s[0]\n"
- "fmla z22.s, z10.s, z5.s[0]\n"
- "fmla z26.s, z10.s, z6.s[0]\n"
- "fmla z30.s, z10.s, z7.s[0]\n"
- "fmla z19.s, z11.s, z4.s[0]\n"
- "fmla z23.s, z11.s, z5.s[0]\n"
- "fmla z27.s, z11.s, z6.s[0]\n"
- "fmla z31.s, z11.s, z7.s[0]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "ld1w z12.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z13.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z14.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z15.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z12.s, z4.s[1]\n"
- "fmla z20.s, z12.s, z5.s[1]\n"
- "fmla z24.s, z12.s, z6.s[1]\n"
- "fmla z28.s, z12.s, z7.s[1]\n"
- "fmla z17.s, z13.s, z4.s[1]\n"
- "fmla z21.s, z13.s, z5.s[1]\n"
- "fmla z25.s, z13.s, z6.s[1]\n"
- "fmla z29.s, z13.s, z7.s[1]\n"
- "fmla z18.s, z14.s, z4.s[1]\n"
- "fmla z22.s, z14.s, z5.s[1]\n"
- "fmla z26.s, z14.s, z6.s[1]\n"
- "fmla z30.s, z14.s, z7.s[1]\n"
- "fmla z19.s, z15.s, z4.s[1]\n"
- "fmla z23.s, z15.s, z5.s[1]\n"
- "fmla z27.s, z15.s, z6.s[1]\n"
- "fmla z31.s, z15.s, z7.s[1]\n"
- "b.eq 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1w z8.s, p0/z, [%[b_ptr0]]\n"
- "ld1w z9.s, p1/z, [%[b_ptr0], #1, MUL VL]\n"
- "ld1w z10.s, p2/z, [%[b_ptr0], #2, MUL VL]\n"
- "ld1w z11.s, p3/z, [%[b_ptr0], #3, MUL VL]\n"
- "fmla z16.s, z8.s, z4.s[2]\n"
- "fmla z20.s, z8.s, z5.s[2]\n"
- "fmla z24.s, z8.s, z6.s[2]\n"
- "fmla z28.s, z8.s, z7.s[2]\n"
- "fmla z17.s, z9.s, z4.s[2]\n"
- "fmla z21.s, z9.s, z5.s[2]\n"
- "fmla z25.s, z9.s, z6.s[2]\n"
- "fmla z29.s, z9.s, z7.s[2]\n"
- "fmla z18.s, z10.s, z4.s[2]\n"
- "fmla z22.s, z10.s, z5.s[2]\n"
- "fmla z26.s, z10.s, z6.s[2]\n"
- "fmla z30.s, z10.s, z7.s[2]\n"
- "fmla z19.s, z11.s, z4.s[2]\n"
- "fmla z23.s, z11.s, z5.s[2]\n"
- "fmla z27.s, z11.s, z6.s[2]\n"
- "fmla z31.s, z11.s, z7.s[2]\n"
- "4:\n"
- "ld1rw z14.s, p7/z, [%[minptr]]\n"
- "ld1rw z15.s, p7/z, [%[maxptr]]\n"
- "fmax z16.s, p7/m, z16.s, z14.s\n"
- "fmax z17.s, p7/m, z17.s, z14.s\n"
- "fmax z18.s, p7/m, z18.s, z14.s\n"
- "fmax z19.s, p7/m, z19.s, z14.s\n"
- "fmin z16.s, p7/m, z16.s, z15.s\n"
- "fmin z17.s, p7/m, z17.s, z15.s\n"
- "fmin z18.s, p7/m, z18.s, z15.s\n"
- "fmin z19.s, p7/m, z19.s, z15.s\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "fmax z20.s, p7/m, z20.s, z14.s\n"
- "fmax z21.s, p7/m, z21.s, z14.s\n"
- "fmax z22.s, p7/m, z22.s, z14.s\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "fmax z23.s, p7/m, z23.s, z14.s\n"
- "fmin z20.s, p7/m, z20.s, z15.s\n"
- "fmin z21.s, p7/m, z21.s, z15.s\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "fmin z22.s, p7/m, z22.s, z15.s\n"
- "fmin z23.s, p7/m, z23.s, z15.s\n"
- "fmax z24.s, p7/m, z24.s, z14.s\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "fmax z25.s, p7/m, z25.s, z14.s\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "fmax z26.s, p7/m, z26.s, z14.s\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "fmin z24.s, p7/m, z24.s, z15.s\n"
- "fmin z25.s, p7/m, z25.s, z15.s\n"
- "fmax z27.s, p7/m, z27.s, z14.s\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "fmin z26.s, p7/m, z26.s, z15.s\n"
- "fmax z28.s, p7/m, z28.s, z14.s\n"
- "fmax z29.s, p7/m, z29.s, z14.s\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "fmin z27.s, p7/m, z27.s, z15.s\n"
- "fmax z30.s, p7/m, z30.s, z14.s\n"
- "fmin z28.s, p7/m, z28.s, z15.s\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "fmin z29.s, p7/m, z29.s, z15.s\n"
- "fmax z31.s, p7/m, z31.s, z14.s\n"
- "fmin z30.s, p7/m, z30.s, z15.s\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "fmin z31.s, p7/m, z31.s, z15.s\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- "st1w z28.s, p0, [c_ptr3]\n"
- "st1w z29.s, p1, [c_ptr3, #1, MUL VL]\n"
- "st1w z30.s, p2, [c_ptr3, #2, MUL VL]\n"
- "st1w z31.s, p3, [c_ptr3, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [biasptr] "r" (biasptr), [minptr] "r" (minptr), [maxptr] "r" (maxptr), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4.hpp
deleted file mode 100644
index 40a69b54ff..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-#include <cstdint>
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void sve_native_s8s32_dot_4VLx4(const int8_t *, int, const int8_t *, int ldb, int32_t *, int, int, int, int, const int32_t *, Activation, bool);
-
-class native_s8s32_dot_4VLx4
-{
-public:
- typedef int8_t operand_type;
- typedef int32_t result_type;
-
- typedef void (*kern_type)(const int8_t *, int, const int8_t *, int ldb, int32_t *, int, int, int, int, const int32_t *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return get_vector_length<int32_t>() * 4;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 4;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return false;
- }
-
- static constexpr bool supports_activation()
- {
- return false;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=sve_native_s8s32_dot_4VLx4;
-
- native_s8s32_dot_4VLx4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4/generic.cpp
deleted file mode 100644
index 7c5d4dc280..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_s8s32_dot_4VLx4/generic.cpp
+++ /dev/null
@@ -1,4494 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_SVE
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-#include <cstdint>
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void sve_native_s8s32_dot_4VLx4(const int8_t *A, int lda, const int8_t *B, int ldb, int32_t *C, int ldc, int M, int N, int K, const int32_t *, Activation , bool append) {
- const long loops_count = ((K + 16) / 32) - 1;
- K -= loops_count * 32;
- const long regs_count = (K / 16) - 1;
- K -= (regs_count + 1) * 16;
- const long leftovers = K;
- const long blocks_count = K / 4;
- const long odds_count = K - (blocks_count * 4);
-
- int rows_to_compute;
-
- for (int y=0; y<M; y+=rows_to_compute) {
- const int8_t * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(int8_t);
-
- int32_t *c_ptr0 = C + (y * ldc);
-
- rows_to_compute = M-y;
- if (rows_to_compute > 4) {
- if (rows_to_compute % 4) {
- rows_to_compute = 4 - 1;
- } else {
- rows_to_compute = 4;
- }
- }
-
- for (int x0=0; x0<N; x0+=(4 * get_vector_length<int32_t>())) {
- const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<int32_t>()));
- long loops = loops_count;
- long regs = regs_count;
- long temp = 0;
- long blocks = blocks_count;
- long odds = odds_count;
- const int8_t *a_ptr0 = a_ptr0_base;
- const int8_t *b_ptr0 = B + x0;
- const int8_t *b_ptr1 = b_ptr0 + ldb;
- const int8_t *b_ptr2 = b_ptr1 + ldb;
- const int8_t *b_ptr3 = b_ptr2 + ldb;
- long ldbb = ldb * sizeof(int8_t) * 4;
- const unsigned long ldcb = ldc * sizeof(int32_t);
-
- switch(rows_to_compute) {
- case 1:
- __asm __volatile (
- "mov z16.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z17.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z18.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z19.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "b 7f\n"
- "3:\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z18.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z19.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z20.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z21.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z22.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z23.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "ptrue p7.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "b 7f\n"
- "3:\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov z18.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z19.s, #0\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "mov z20.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z21.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z22.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z23.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z24.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z25.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "mov z26.s, #0\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "mov z27.s, #0\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "incw %[temp], all, mul #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "ptrue p7.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2]\n"
- "incw %[temp], all, mul #1\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "b 7f\n"
- "3:\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z6.b, p6/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov z18.s, #0\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "mov z19.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z20.s, #0\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "mov z21.s, #0\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "mov z22.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z23.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z24.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z25.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z26.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z27.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "mov z28.s, #0\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "mov z29.s, #0\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "incw %[temp], all, mul #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "mov z30.s, #0\n"
- "ptrue p7.b\n"
- "mov z31.s, #0\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2]\n"
- "incw %[temp], all, mul #1\n"
- "ld1rqb z3.b, p7/z, [a_ptr3]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z7.b, p7/z, [a_ptr3]\n"
- "sdot z28.s, z8.b, z3.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "sdot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "sdot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "sdot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z9.b, z3.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z30.s, z10.b, z3.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z31.s, z11.b, z3.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z28.s, z12.b, z3.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z3.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z30.s, z14.b, z3.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2, #-0x10]\n"
- "sdot z31.s, z15.b, z3.b[3]\n"
- "ld1rqb z3.b, p7/z, [a_ptr3, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "sdot z28.s, z8.b, z7.b[0]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z9.b, z7.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z30.s, z10.b, z7.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "sdot z31.s, z11.b, z7.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z28.s, z12.b, z7.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z7.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z30.s, z14.b, z7.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "sdot z31.s, z15.b, z7.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z28.s, z8.b, z7.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z9.b, z7.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z30.s, z10.b, z7.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "sdot z31.s, z11.b, z7.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "sdot z28.s, z12.b, z7.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z7.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "sdot z30.s, z14.b, z7.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "sdot z31.s, z15.b, z7.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "sdot z28.s, z8.b, z3.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "ld1rqb z7.b, p7/z, [a_ptr3]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "sdot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "sdot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "sdot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z9.b, z3.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z30.s, z10.b, z3.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z31.s, z11.b, z3.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z28.s, z12.b, z3.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z3.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z30.s, z14.b, z3.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
- "sdot z31.s, z15.b, z3.b[3]\n"
- "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "sdot z28.s, z8.b, z7.b[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl a_ptr3, a_ptr3, #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z9.b, z7.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z30.s, z10.b, z7.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "sdot z31.s, z11.b, z7.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z28.s, z12.b, z7.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z7.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z30.s, z14.b, z7.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "sdot z31.s, z15.b, z7.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z28.s, z8.b, z7.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z29.s, z9.b, z7.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z30.s, z10.b, z7.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "sdot z31.s, z11.b, z7.b[2]\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "sdot z28.s, z12.b, z7.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "sdot z29.s, z13.b, z7.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "sdot z30.s, z14.b, z7.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "sdot z31.s, z15.b, z7.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "sdot z28.s, z8.b, z3.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "sdot z29.s, z9.b, z3.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "sdot z31.s, z11.b, z3.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z28.s, z12.b, z3.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z30.s, z14.b, z3.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "sdot z31.s, z15.b, z3.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z28.s, z8.b, z3.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "sdot z29.s, z9.b, z3.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z30.s, z10.b, z3.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z31.s, z11.b, z3.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z28.s, z12.b, z3.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "sdot z29.s, z13.b, z3.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z30.s, z14.b, z3.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "sdot z31.s, z15.b, z3.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z28.s, z8.b, z3.b[2]\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "sdot z29.s, z9.b, z3.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z30.s, z10.b, z3.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z31.s, z11.b, z3.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z28.s, z12.b, z3.b[1]\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z30.s, z14.b, z3.b[1]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "sdot z31.s, z15.b, z3.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "sdot z28.s, z8.b, z3.b[0]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "sdot z29.s, z9.b, z3.b[0]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "sdot z31.s, z11.b, z3.b[0]\n"
- "b 7f\n"
- "3:\n"
- "sdot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "sdot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "sdot z28.s, z8.b, z3.b[0]\n"
- "ld1rqb z6.b, p6/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "ld1rqb z7.b, p6/z, [a_ptr3]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "sdot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "sdot z25.s, z9.b, z2.b[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "sdot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "sdot z22.s, z10.b, z1.b[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "sdot z26.s, z10.b, z2.b[0]\n"
- "addvl a_ptr3, a_ptr3, #1\n"
- "sdot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "sdot z23.s, z11.b, z1.b[0]\n"
- "sdot z27.s, z11.b, z2.b[0]\n"
- "sdot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "sdot z20.s, z12.b, z1.b[1]\n"
- "sdot z24.s, z12.b, z2.b[1]\n"
- "sdot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "sdot z17.s, z13.b, z0.b[1]\n"
- "sdot z21.s, z13.b, z1.b[1]\n"
- "sdot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "sdot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "sdot z18.s, z14.b, z0.b[1]\n"
- "sdot z22.s, z14.b, z1.b[1]\n"
- "sdot z26.s, z14.b, z2.b[1]\n"
- "sdot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "sdot z19.s, z15.b, z0.b[1]\n"
- "sdot z23.s, z15.b, z1.b[1]\n"
- "sdot z27.s, z15.b, z2.b[1]\n"
- "sdot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "sdot z16.s, z8.b, z0.b[2]\n"
- "sdot z20.s, z8.b, z1.b[2]\n"
- "sdot z24.s, z8.b, z2.b[2]\n"
- "sdot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "sdot z17.s, z9.b, z0.b[2]\n"
- "sdot z21.s, z9.b, z1.b[2]\n"
- "sdot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z29.s, z9.b, z3.b[2]\n"
- "sdot z18.s, z10.b, z0.b[2]\n"
- "sdot z22.s, z10.b, z1.b[2]\n"
- "sdot z26.s, z10.b, z2.b[2]\n"
- "sdot z30.s, z10.b, z3.b[2]\n"
- "sdot z19.s, z11.b, z0.b[2]\n"
- "sdot z23.s, z11.b, z1.b[2]\n"
- "sdot z27.s, z11.b, z2.b[2]\n"
- "sdot z31.s, z11.b, z3.b[2]\n"
- "sdot z16.s, z12.b, z0.b[3]\n"
- "sdot z20.s, z12.b, z1.b[3]\n"
- "sdot z24.s, z12.b, z2.b[3]\n"
- "sdot z28.s, z12.b, z3.b[3]\n"
- "sdot z17.s, z13.b, z0.b[3]\n"
- "sdot z21.s, z13.b, z1.b[3]\n"
- "sdot z25.s, z13.b, z2.b[3]\n"
- "sdot z29.s, z13.b, z3.b[3]\n"
- "sdot z18.s, z14.b, z0.b[3]\n"
- "sdot z22.s, z14.b, z1.b[3]\n"
- "sdot z26.s, z14.b, z2.b[3]\n"
- "sdot z30.s, z14.b, z3.b[3]\n"
- "sdot z19.s, z15.b, z0.b[3]\n"
- "sdot z23.s, z15.b, z1.b[3]\n"
- "sdot z27.s, z15.b, z2.b[3]\n"
- "sdot z31.s, z15.b, z3.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "sdot z28.s, z8.b, z7.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "sdot z29.s, z9.b, z7.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z30.s, z10.b, z7.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "sdot z31.s, z11.b, z7.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z28.s, z12.b, z7.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "sdot z29.s, z13.b, z7.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z30.s, z14.b, z7.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "sdot z31.s, z15.b, z7.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z28.s, z8.b, z7.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "sdot z29.s, z9.b, z7.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z30.s, z10.b, z7.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "sdot z31.s, z11.b, z7.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[3]\n"
- "sdot z20.s, z12.b, z5.b[3]\n"
- "sdot z24.s, z12.b, z6.b[3]\n"
- "sdot z28.s, z12.b, z7.b[3]\n"
- "sdot z17.s, z13.b, z4.b[3]\n"
- "sdot z21.s, z13.b, z5.b[3]\n"
- "sdot z25.s, z13.b, z6.b[3]\n"
- "sdot z29.s, z13.b, z7.b[3]\n"
- "sdot z18.s, z14.b, z4.b[3]\n"
- "sdot z22.s, z14.b, z5.b[3]\n"
- "sdot z26.s, z14.b, z6.b[3]\n"
- "sdot z30.s, z14.b, z7.b[3]\n"
- "sdot z19.s, z15.b, z4.b[3]\n"
- "sdot z23.s, z15.b, z5.b[3]\n"
- "sdot z27.s, z15.b, z6.b[3]\n"
- "sdot z31.s, z15.b, z7.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[2]\n"
- "sdot z20.s, z8.b, z5.b[2]\n"
- "sdot z24.s, z8.b, z6.b[2]\n"
- "sdot z28.s, z8.b, z7.b[2]\n"
- "sdot z17.s, z9.b, z4.b[2]\n"
- "sdot z21.s, z9.b, z5.b[2]\n"
- "sdot z25.s, z9.b, z6.b[2]\n"
- "sdot z29.s, z9.b, z7.b[2]\n"
- "sdot z18.s, z10.b, z4.b[2]\n"
- "sdot z22.s, z10.b, z5.b[2]\n"
- "sdot z26.s, z10.b, z6.b[2]\n"
- "sdot z30.s, z10.b, z7.b[2]\n"
- "sdot z19.s, z11.b, z4.b[2]\n"
- "sdot z23.s, z11.b, z5.b[2]\n"
- "sdot z27.s, z11.b, z6.b[2]\n"
- "sdot z31.s, z11.b, z7.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "sdot z16.s, z12.b, z4.b[1]\n"
- "sdot z20.s, z12.b, z5.b[1]\n"
- "sdot z24.s, z12.b, z6.b[1]\n"
- "sdot z28.s, z12.b, z7.b[1]\n"
- "sdot z17.s, z13.b, z4.b[1]\n"
- "sdot z21.s, z13.b, z5.b[1]\n"
- "sdot z25.s, z13.b, z6.b[1]\n"
- "sdot z29.s, z13.b, z7.b[1]\n"
- "sdot z18.s, z14.b, z4.b[1]\n"
- "sdot z22.s, z14.b, z5.b[1]\n"
- "sdot z26.s, z14.b, z6.b[1]\n"
- "sdot z30.s, z14.b, z7.b[1]\n"
- "sdot z19.s, z15.b, z4.b[1]\n"
- "sdot z23.s, z15.b, z5.b[1]\n"
- "sdot z27.s, z15.b, z6.b[1]\n"
- "sdot z31.s, z15.b, z7.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "sdot z16.s, z8.b, z4.b[0]\n"
- "sdot z20.s, z8.b, z5.b[0]\n"
- "sdot z24.s, z8.b, z6.b[0]\n"
- "sdot z28.s, z8.b, z7.b[0]\n"
- "sdot z17.s, z9.b, z4.b[0]\n"
- "sdot z21.s, z9.b, z5.b[0]\n"
- "sdot z25.s, z9.b, z6.b[0]\n"
- "sdot z29.s, z9.b, z7.b[0]\n"
- "sdot z18.s, z10.b, z4.b[0]\n"
- "sdot z22.s, z10.b, z5.b[0]\n"
- "sdot z26.s, z10.b, z6.b[0]\n"
- "sdot z30.s, z10.b, z7.b[0]\n"
- "sdot z19.s, z11.b, z4.b[0]\n"
- "sdot z23.s, z11.b, z5.b[0]\n"
- "sdot z27.s, z11.b, z6.b[0]\n"
- "sdot z31.s, z11.b, z7.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- "st1w z28.s, p0, [c_ptr3]\n"
- "st1w z29.s, p1, [c_ptr3, #1, MUL VL]\n"
- "st1w z30.s, p2, [c_ptr3, #2, MUL VL]\n"
- "st1w z31.s, p3, [c_ptr3, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4.hpp
deleted file mode 100644
index 043fa7484a..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4.hpp
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#pragma once
-
-#ifdef __ARM_FEATURE_SVE
-
-#include <cstdint>
-
-
-namespace arm_gemm
-{
-
-// Actual kernel implementations
-void sve_native_u8u32_dot_4VLx4(const uint8_t *, int, const uint8_t *, int ldb, uint32_t *, int, int, int, int, const uint32_t *, Activation, bool);
-
-class native_u8u32_dot_4VLx4
-{
-public:
- typedef uint8_t operand_type;
- typedef uint32_t result_type;
-
- typedef void (*kern_type)(const uint8_t *, int, const uint8_t *, int ldb, uint32_t *, int, int, int, int, const uint32_t *, Activation, bool);
-
- /* Kernel blocking parameters */
- static constexpr unsigned int out_height()
- {
- return 4;
- }
-
- static unsigned int out_width()
- {
- return get_vector_length<uint32_t>() * 4;
- }
-
- static constexpr unsigned int k_unroll()
- {
- return 4;
- }
-
- static constexpr bool supports_append()
- {
- return false;
- }
-
- static constexpr bool supports_bias()
- {
- return false;
- }
-
- static constexpr bool supports_activation()
- {
- return false;
- }
-
-
-
- // Default to the generic kernel
- kern_type kernel=sve_native_u8u32_dot_4VLx4;
-
- native_u8u32_dot_4VLx4(const CPUInfo *)
- {
-
- }
-};
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4/generic.cpp
deleted file mode 100644
index bbc1092e4e..0000000000
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_native_u8u32_dot_4VLx4/generic.cpp
+++ /dev/null
@@ -1,4494 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifdef __ARM_FEATURE_SVE
-
-#include <algorithm>
-
-#include "arm_gemm.hpp"
-#include <cstdint>
-#include "../../asmlib.hpp"
-#include "../../utils.hpp"
-
-namespace arm_gemm {
-
-void sve_native_u8u32_dot_4VLx4(const uint8_t *A, int lda, const uint8_t *B, int ldb, uint32_t *C, int ldc, int M, int N, int K, const uint32_t *, Activation , bool append) {
- const long loops_count = ((K + 16) / 32) - 1;
- K -= loops_count * 32;
- const long regs_count = (K / 16) - 1;
- K -= (regs_count + 1) * 16;
- const long leftovers = K;
- const long blocks_count = K / 4;
- const long odds_count = K - (blocks_count * 4);
-
- int rows_to_compute;
-
- for (int y=0; y<M; y+=rows_to_compute) {
- const uint8_t * const a_ptr0_base = A + (y * lda);
- const unsigned long ldab = lda * sizeof(uint8_t);
-
- uint32_t *c_ptr0 = C + (y * ldc);
-
- rows_to_compute = M-y;
- if (rows_to_compute > 4) {
- if (rows_to_compute % 4) {
- rows_to_compute = 4 - 1;
- } else {
- rows_to_compute = 4;
- }
- }
-
- for (int x0=0; x0<N; x0+=(4 * get_vector_length<uint32_t>())) {
- const long width = std::min((unsigned long)N-x0, (4 * get_vector_length<uint32_t>()));
- long loops = loops_count;
- long regs = regs_count;
- long temp = 0;
- long blocks = blocks_count;
- long odds = odds_count;
- const uint8_t *a_ptr0 = a_ptr0_base;
- const uint8_t *b_ptr0 = B + x0;
- const uint8_t *b_ptr1 = b_ptr0 + ldb;
- const uint8_t *b_ptr2 = b_ptr1 + ldb;
- const uint8_t *b_ptr3 = b_ptr2 + ldb;
- long ldbb = ldb * sizeof(uint8_t) * 4;
- const unsigned long ldcb = ldc * sizeof(uint32_t);
-
- switch(rows_to_compute) {
- case 1:
- __asm __volatile (
- "mov z16.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z17.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z18.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z19.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "ptrue p7.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "b 7f\n"
- "3:\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "cc", "memory"
- );
- break;
- case 2:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "c_ptr1 .req X1\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z18.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z19.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z20.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z21.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z22.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z23.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "ptrue p7.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "incw %[temp], all, mul #1\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "b 7f\n"
- "3:\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq c_ptr1\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "cc", "memory"
- );
- break;
- case 3:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "c_ptr1 .req X2\n"
- "c_ptr2 .req X3\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov z18.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z19.s, #0\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "mov z20.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z21.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z22.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z23.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z24.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z25.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "mov z26.s, #0\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "mov z27.s, #0\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "incw %[temp], all, mul #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "ptrue p7.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2]\n"
- "incw %[temp], all, mul #1\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "b 7f\n"
- "3:\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "ld1rqb z6.b, p6/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "cc", "memory"
- );
- break;
- default:
- case 4:
- __asm __volatile (
- "a_ptr1 .req X0\n"
- "a_ptr2 .req X1\n"
- "a_ptr3 .req X2\n"
- "c_ptr1 .req X3\n"
- "c_ptr2 .req X4\n"
- "c_ptr3 .req X5\n"
- "mov z16.s, #0\n"
- "add a_ptr1, %[a_ptr0], %[lda]\n"
- "mov z17.s, #0\n"
- "add a_ptr2, a_ptr1, %[lda]\n"
- "mov z18.s, #0\n"
- "add a_ptr3, a_ptr2, %[lda]\n"
- "mov z19.s, #0\n"
- "add c_ptr1, %[c_ptr0], %[ldc]\n"
- "mov z20.s, #0\n"
- "add c_ptr2, c_ptr1, %[ldc]\n"
- "mov z21.s, #0\n"
- "add c_ptr3, c_ptr2, %[ldc]\n"
- "mov z22.s, #0\n"
- "whilelt p6.b, %[temp], %[leftovers]\n"
- "mov z23.s, #0\n"
- "whilelt p0.s, %[temp], %[width]\n"
- "mov z24.s, #0\n"
- "whilelt p4.b, %[temp], %[width]\n"
- "mov z25.s, #0\n"
- "incw %[temp], all, mul #1\n"
- "mov z26.s, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "mov z27.s, #0\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "mov z28.s, #0\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "mov z29.s, #0\n"
- "whilelt p1.s, %[temp], %[width]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "incw %[temp], all, mul #1\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "mov z30.s, #0\n"
- "ptrue p7.b\n"
- "mov z31.s, #0\n"
- "whilelt p2.s, %[temp], %[width]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "ld1rqb z1.b, p7/z, [a_ptr1]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2]\n"
- "incw %[temp], all, mul #1\n"
- "ld1rqb z3.b, p7/z, [a_ptr3]\n"
- "add %[a_ptr0], %[a_ptr0], #0x10\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "whilelt p3.s, %[temp], %[width]\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "add a_ptr1, a_ptr1, #0x10\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "add a_ptr2, a_ptr2, #0x10\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "add a_ptr3, a_ptr3, #0x10\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "cbz %[loops], 1f\n"
- "2:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z7.b, p7/z, [a_ptr3]\n"
- "udot z28.s, z8.b, z3.b[0]\n"
- "subs %[loops], %[loops], #0x1\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[a_ptr0], %[a_ptr0], #0x20\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "add a_ptr1, a_ptr1, #0x20\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "add a_ptr2, a_ptr2, #0x20\n"
- "udot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "add a_ptr3, a_ptr3, #0x20\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "udot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "udot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z9.b, z3.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z30.s, z10.b, z3.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z31.s, z11.b, z3.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z28.s, z12.b, z3.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z3.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z30.s, z14.b, z3.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p7/z, [%[a_ptr0], #-0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p7/z, [a_ptr1, #-0x10]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p7/z, [a_ptr2, #-0x10]\n"
- "udot z31.s, z15.b, z3.b[3]\n"
- "ld1rqb z3.b, p7/z, [a_ptr3, #-0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "udot z28.s, z8.b, z7.b[0]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z9.b, z7.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z30.s, z10.b, z7.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "udot z31.s, z11.b, z7.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z28.s, z12.b, z7.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z7.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z30.s, z14.b, z7.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "udot z31.s, z15.b, z7.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z28.s, z8.b, z7.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z9.b, z7.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z30.s, z10.b, z7.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "udot z31.s, z11.b, z7.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "udot z28.s, z12.b, z7.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z7.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "udot z30.s, z14.b, z7.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "udot z31.s, z15.b, z7.b[3]\n"
- "b.ne 2b\n"
- "1:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "cbz %[regs], 3f\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p7/z, [%[a_ptr0]]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p7/z, [a_ptr1]\n"
- "udot z28.s, z8.b, z3.b[0]\n"
- "ld1rqb z6.b, p7/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "ld1rqb z7.b, p7/z, [a_ptr3]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "udot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "udot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "udot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z9.b, z3.b[2]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z30.s, z10.b, z3.b[2]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z31.s, z11.b, z3.b[2]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z28.s, z12.b, z3.b[3]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z3.b[3]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z30.s, z14.b, z3.b[3]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "ld1rqb z0.b, p6/z, [%[a_ptr0], #0x10]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "ld1rqb z1.b, p6/z, [a_ptr1, #0x10]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "ld1rqb z2.b, p6/z, [a_ptr2, #0x10]\n"
- "udot z31.s, z15.b, z3.b[3]\n"
- "ld1rqb z3.b, p6/z, [a_ptr3, #0x10]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "addvl %[a_ptr0], %[a_ptr0], #2\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "addvl a_ptr1, a_ptr1, #2\n"
- "udot z28.s, z8.b, z7.b[0]\n"
- "addvl a_ptr2, a_ptr2, #2\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "addvl a_ptr3, a_ptr3, #2\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z9.b, z7.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z30.s, z10.b, z7.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "udot z31.s, z11.b, z7.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z28.s, z12.b, z7.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z7.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z30.s, z14.b, z7.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "udot z31.s, z15.b, z7.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z28.s, z8.b, z7.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z29.s, z9.b, z7.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z30.s, z10.b, z7.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "udot z31.s, z11.b, z7.b[2]\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "udot z28.s, z12.b, z7.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "udot z29.s, z13.b, z7.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "udot z30.s, z14.b, z7.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "udot z31.s, z15.b, z7.b[3]\n"
- "cbz %[blocks], 4f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "udot z28.s, z8.b, z3.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "udot z29.s, z9.b, z3.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z30.s, z10.b, z3.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "udot z31.s, z11.b, z3.b[0]\n"
- "b.eq 5f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z28.s, z12.b, z3.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z30.s, z14.b, z3.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "udot z31.s, z15.b, z3.b[1]\n"
- "b.eq 6f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z28.s, z8.b, z3.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "udot z29.s, z9.b, z3.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z30.s, z10.b, z3.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z31.s, z11.b, z3.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 8f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 9f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "9:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 10f\n"
- "8:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "10:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z28.s, z12.b, z3.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "udot z29.s, z13.b, z3.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z30.s, z14.b, z3.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "udot z31.s, z15.b, z3.b[3]\n"
- "b 7f\n"
- "6:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 11f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 12f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "12:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 13f\n"
- "11:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "13:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z28.s, z8.b, z3.b[2]\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "udot z29.s, z9.b, z3.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z30.s, z10.b, z3.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z31.s, z11.b, z3.b[2]\n"
- "b 7f\n"
- "5:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 14f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 15f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "15:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 16f\n"
- "14:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "16:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z28.s, z12.b, z3.b[1]\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z30.s, z14.b, z3.b[1]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "udot z31.s, z15.b, z3.b[1]\n"
- "b 7f\n"
- "4:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 17f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 18f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "18:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 19f\n"
- "17:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "19:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "udot z28.s, z8.b, z3.b[0]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "udot z29.s, z9.b, z3.b[0]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "udot z30.s, z10.b, z3.b[0]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "udot z31.s, z11.b, z3.b[0]\n"
- "b 7f\n"
- "3:\n"
- "udot z16.s, z8.b, z0.b[0]\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z20.s, z8.b, z1.b[0]\n"
- "ld1rqb z4.b, p6/z, [%[a_ptr0]]\n"
- "udot z24.s, z8.b, z2.b[0]\n"
- "ld1rqb z5.b, p6/z, [a_ptr1]\n"
- "udot z28.s, z8.b, z3.b[0]\n"
- "ld1rqb z6.b, p6/z, [a_ptr2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "ld1rqb z7.b, p6/z, [a_ptr3]\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z17.s, z9.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "addvl %[a_ptr0], %[a_ptr0], #1\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "udot z21.s, z9.b, z1.b[0]\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "udot z25.s, z9.b, z2.b[0]\n"
- "addvl a_ptr1, a_ptr1, #1\n"
- "udot z29.s, z9.b, z3.b[0]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z10.b, z0.b[0]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "udot z22.s, z10.b, z1.b[0]\n"
- "addvl a_ptr2, a_ptr2, #1\n"
- "udot z26.s, z10.b, z2.b[0]\n"
- "addvl a_ptr3, a_ptr3, #1\n"
- "udot z30.s, z10.b, z3.b[0]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z11.b, z0.b[0]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "udot z23.s, z11.b, z1.b[0]\n"
- "udot z27.s, z11.b, z2.b[0]\n"
- "udot z31.s, z11.b, z3.b[0]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z12.b, z0.b[1]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "udot z20.s, z12.b, z1.b[1]\n"
- "udot z24.s, z12.b, z2.b[1]\n"
- "udot z28.s, z12.b, z3.b[1]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "udot z17.s, z13.b, z0.b[1]\n"
- "udot z21.s, z13.b, z1.b[1]\n"
- "udot z25.s, z13.b, z2.b[1]\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "udot z29.s, z13.b, z3.b[1]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "udot z18.s, z14.b, z0.b[1]\n"
- "udot z22.s, z14.b, z1.b[1]\n"
- "udot z26.s, z14.b, z2.b[1]\n"
- "udot z30.s, z14.b, z3.b[1]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "udot z19.s, z15.b, z0.b[1]\n"
- "udot z23.s, z15.b, z1.b[1]\n"
- "udot z27.s, z15.b, z2.b[1]\n"
- "udot z31.s, z15.b, z3.b[1]\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "udot z16.s, z8.b, z0.b[2]\n"
- "udot z20.s, z8.b, z1.b[2]\n"
- "udot z24.s, z8.b, z2.b[2]\n"
- "udot z28.s, z8.b, z3.b[2]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "udot z17.s, z9.b, z0.b[2]\n"
- "udot z21.s, z9.b, z1.b[2]\n"
- "udot z25.s, z9.b, z2.b[2]\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z29.s, z9.b, z3.b[2]\n"
- "udot z18.s, z10.b, z0.b[2]\n"
- "udot z22.s, z10.b, z1.b[2]\n"
- "udot z26.s, z10.b, z2.b[2]\n"
- "udot z30.s, z10.b, z3.b[2]\n"
- "udot z19.s, z11.b, z0.b[2]\n"
- "udot z23.s, z11.b, z1.b[2]\n"
- "udot z27.s, z11.b, z2.b[2]\n"
- "udot z31.s, z11.b, z3.b[2]\n"
- "udot z16.s, z12.b, z0.b[3]\n"
- "udot z20.s, z12.b, z1.b[3]\n"
- "udot z24.s, z12.b, z2.b[3]\n"
- "udot z28.s, z12.b, z3.b[3]\n"
- "udot z17.s, z13.b, z0.b[3]\n"
- "udot z21.s, z13.b, z1.b[3]\n"
- "udot z25.s, z13.b, z2.b[3]\n"
- "udot z29.s, z13.b, z3.b[3]\n"
- "udot z18.s, z14.b, z0.b[3]\n"
- "udot z22.s, z14.b, z1.b[3]\n"
- "udot z26.s, z14.b, z2.b[3]\n"
- "udot z30.s, z14.b, z3.b[3]\n"
- "udot z19.s, z15.b, z0.b[3]\n"
- "udot z23.s, z15.b, z1.b[3]\n"
- "udot z27.s, z15.b, z2.b[3]\n"
- "udot z31.s, z15.b, z3.b[3]\n"
- "cbz %[blocks], 20f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "udot z28.s, z8.b, z7.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "udot z29.s, z9.b, z7.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z30.s, z10.b, z7.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "udot z31.s, z11.b, z7.b[0]\n"
- "b.eq 21f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "subs %[blocks], %[blocks], #0x1\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "ld1b z12.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z28.s, z12.b, z7.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "udot z29.s, z13.b, z7.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z30.s, z14.b, z7.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "udot z31.s, z15.b, z7.b[1]\n"
- "b.eq 22f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "add %[b_ptr3], %[b_ptr3], %[ldb]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "ld1b z8.b, p4/z, [%[b_ptr3]]\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z28.s, z8.b, z7.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "udot z29.s, z9.b, z7.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z30.s, z10.b, z7.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "udot z31.s, z11.b, z7.b[2]\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 23f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 24f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "24:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 25f\n"
- "23:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "25:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[3]\n"
- "udot z20.s, z12.b, z5.b[3]\n"
- "udot z24.s, z12.b, z6.b[3]\n"
- "udot z28.s, z12.b, z7.b[3]\n"
- "udot z17.s, z13.b, z4.b[3]\n"
- "udot z21.s, z13.b, z5.b[3]\n"
- "udot z25.s, z13.b, z6.b[3]\n"
- "udot z29.s, z13.b, z7.b[3]\n"
- "udot z18.s, z14.b, z4.b[3]\n"
- "udot z22.s, z14.b, z5.b[3]\n"
- "udot z26.s, z14.b, z6.b[3]\n"
- "udot z30.s, z14.b, z7.b[3]\n"
- "udot z19.s, z15.b, z4.b[3]\n"
- "udot z23.s, z15.b, z5.b[3]\n"
- "udot z27.s, z15.b, z6.b[3]\n"
- "udot z31.s, z15.b, z7.b[3]\n"
- "b 7f\n"
- "22:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 26f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 27f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "27:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 28f\n"
- "26:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "28:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[2]\n"
- "udot z20.s, z8.b, z5.b[2]\n"
- "udot z24.s, z8.b, z6.b[2]\n"
- "udot z28.s, z8.b, z7.b[2]\n"
- "udot z17.s, z9.b, z4.b[2]\n"
- "udot z21.s, z9.b, z5.b[2]\n"
- "udot z25.s, z9.b, z6.b[2]\n"
- "udot z29.s, z9.b, z7.b[2]\n"
- "udot z18.s, z10.b, z4.b[2]\n"
- "udot z22.s, z10.b, z5.b[2]\n"
- "udot z26.s, z10.b, z6.b[2]\n"
- "udot z30.s, z10.b, z7.b[2]\n"
- "udot z19.s, z11.b, z4.b[2]\n"
- "udot z23.s, z11.b, z5.b[2]\n"
- "udot z27.s, z11.b, z6.b[2]\n"
- "udot z31.s, z11.b, z7.b[2]\n"
- "b 7f\n"
- "21:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 29f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 30f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z13.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "30:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z14.b, p4/z, [%[b_ptr1]]\n"
- "b 31f\n"
- "29:\n"
- "mov z13.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z14.b, #0\n"
- "ld1b z12.b, p4/z, [%[b_ptr0]]\n"
- "31:\n"
- "zip2 z15.b, z12.b, z13.b\n"
- "zip1 z13.b, z12.b, z13.b\n"
- "mov z12.b, #0\n"
- "zip2 z8.b, z14.b, z12.b\n"
- "zip1 z14.b, z14.b, z12.b\n"
- "zip1 z12.b, z13.b, z14.b\n"
- "zip2 z13.b, z13.b, z14.b\n"
- "zip1 z14.b, z15.b, z8.b\n"
- "zip2 z15.b, z15.b, z8.b\n"
- "udot z16.s, z12.b, z4.b[1]\n"
- "udot z20.s, z12.b, z5.b[1]\n"
- "udot z24.s, z12.b, z6.b[1]\n"
- "udot z28.s, z12.b, z7.b[1]\n"
- "udot z17.s, z13.b, z4.b[1]\n"
- "udot z21.s, z13.b, z5.b[1]\n"
- "udot z25.s, z13.b, z6.b[1]\n"
- "udot z29.s, z13.b, z7.b[1]\n"
- "udot z18.s, z14.b, z4.b[1]\n"
- "udot z22.s, z14.b, z5.b[1]\n"
- "udot z26.s, z14.b, z6.b[1]\n"
- "udot z30.s, z14.b, z7.b[1]\n"
- "udot z19.s, z15.b, z4.b[1]\n"
- "udot z23.s, z15.b, z5.b[1]\n"
- "udot z27.s, z15.b, z6.b[1]\n"
- "udot z31.s, z15.b, z7.b[1]\n"
- "b 7f\n"
- "20:\n"
- "cbz %[odds], 7f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 32f\n"
- "subs %[odds], %[odds], #0x1\n"
- "b.eq 33f\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr2], %[b_ptr2], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z9.b, p4/z, [%[b_ptr2]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "33:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "add %[b_ptr1], %[b_ptr1], %[ldb]\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "ld1b z10.b, p4/z, [%[b_ptr1]]\n"
- "b 34f\n"
- "32:\n"
- "mov z9.b, #0\n"
- "add %[b_ptr0], %[b_ptr0], %[ldb]\n"
- "mov z10.b, #0\n"
- "ld1b z8.b, p4/z, [%[b_ptr0]]\n"
- "34:\n"
- "zip2 z11.b, z8.b, z9.b\n"
- "zip1 z9.b, z8.b, z9.b\n"
- "mov z8.b, #0\n"
- "zip2 z12.b, z10.b, z8.b\n"
- "zip1 z10.b, z10.b, z8.b\n"
- "zip1 z8.b, z9.b, z10.b\n"
- "zip2 z9.b, z9.b, z10.b\n"
- "zip1 z10.b, z11.b, z12.b\n"
- "zip2 z11.b, z11.b, z12.b\n"
- "udot z16.s, z8.b, z4.b[0]\n"
- "udot z20.s, z8.b, z5.b[0]\n"
- "udot z24.s, z8.b, z6.b[0]\n"
- "udot z28.s, z8.b, z7.b[0]\n"
- "udot z17.s, z9.b, z4.b[0]\n"
- "udot z21.s, z9.b, z5.b[0]\n"
- "udot z25.s, z9.b, z6.b[0]\n"
- "udot z29.s, z9.b, z7.b[0]\n"
- "udot z18.s, z10.b, z4.b[0]\n"
- "udot z22.s, z10.b, z5.b[0]\n"
- "udot z26.s, z10.b, z6.b[0]\n"
- "udot z30.s, z10.b, z7.b[0]\n"
- "udot z19.s, z11.b, z4.b[0]\n"
- "udot z23.s, z11.b, z5.b[0]\n"
- "udot z27.s, z11.b, z6.b[0]\n"
- "udot z31.s, z11.b, z7.b[0]\n"
- "7:\n"
- "st1w z16.s, p0, [%[c_ptr0]]\n"
- "st1w z17.s, p1, [%[c_ptr0], #1, MUL VL]\n"
- "st1w z18.s, p2, [%[c_ptr0], #2, MUL VL]\n"
- "st1w z19.s, p3, [%[c_ptr0], #3, MUL VL]\n"
- "addvl %[c_ptr0], %[c_ptr0], #4\n"
- "st1w z20.s, p0, [c_ptr1]\n"
- "st1w z21.s, p1, [c_ptr1, #1, MUL VL]\n"
- "st1w z22.s, p2, [c_ptr1, #2, MUL VL]\n"
- "st1w z23.s, p3, [c_ptr1, #3, MUL VL]\n"
- "st1w z24.s, p0, [c_ptr2]\n"
- "st1w z25.s, p1, [c_ptr2, #1, MUL VL]\n"
- "st1w z26.s, p2, [c_ptr2, #2, MUL VL]\n"
- "st1w z27.s, p3, [c_ptr2, #3, MUL VL]\n"
- "st1w z28.s, p0, [c_ptr3]\n"
- "st1w z29.s, p1, [c_ptr3, #1, MUL VL]\n"
- "st1w z30.s, p2, [c_ptr3, #2, MUL VL]\n"
- "st1w z31.s, p3, [c_ptr3, #3, MUL VL]\n"
- ".unreq a_ptr1\n"
- ".unreq a_ptr2\n"
- ".unreq a_ptr3\n"
- ".unreq c_ptr1\n"
- ".unreq c_ptr2\n"
- ".unreq c_ptr3\n"
- : [a_ptr0] "+r" (a_ptr0), [b_ptr0] "+r" (b_ptr0), [b_ptr1] "+r" (b_ptr1), [b_ptr2] "+r" (b_ptr2), [b_ptr3] "+r" (b_ptr3), [c_ptr0] "+r" (c_ptr0), [loops] "+r" (loops), [regs] "+r" (regs), [temp] "+r" (temp), [blocks] "+r" (blocks), [odds] "+r" (odds)
- : [width] "r" (width), [append] "r" (static_cast<uint64_t>(append)), [lda] "r" (ldab), [ldc] "r" (ldcb), [leftovers] "r" (leftovers), [ldb] "r" (ldbb)
- : "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31", "x0", "x1", "x2", "x3", "x4", "x5", "cc", "memory"
- );
- break;
- }
-
- }
- }
-}
-
-} // namespace arm_gemm
-
-#endif // __ARM_FEATURE_SVE
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp
index 6b070d6d71..b555066195 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,7 +57,7 @@ public:
return 1;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
@@ -77,7 +77,7 @@ public:
// Default to the generic kernel
kern_type kernel=sve_smallK_hybrid_fp32_mla_1VLx8;
- smallK_hybrid_fp32_mla_1VLx8(const CPUInfo *ci)
+ smallK_hybrid_fp32_mla_1VLx8(const CPUInfo *)
{
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp
index 9bc0969bf2..eef1e4cc65 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_s8s32_dot_1VLx8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,7 +57,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp
index cc27c13533..70a0b12130 100644
--- a/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_smallK_hybrid_u8u32_dot_1VLx8.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -57,7 +57,7 @@ public:
return 4;
}
- static constexpr bool supports_append()
+ static constexpr bool supports_accumulate()
{
return false;
}
diff --git a/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp b/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
index 5c58c585d7..eec842d09f 100644
--- a/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
+++ b/src/core/NEON/kernels/arm_gemm/quantize_wrapper.hpp
@@ -61,16 +61,9 @@ private:
}
/* Local working space: We need space for the subgemm output (above) and
- * the row sums. If the GEMM is not pretransposed we need to store the
- * column sums here too. */
+ * the row sums. */
size_t local_working_size() const {
- size_t sz = subgemm_output_size() + row_sum_size();
-
- if (_args._pretransposed_hint) {
- return sz;
- }
-
- return sz + col_sum_size();
+ return subgemm_output_size() + row_sum_size();
}
void set_child_arrays() {
@@ -90,15 +83,6 @@ private:
}
}
- void col_sums_runtime(unsigned int threadid) {
- unsigned int first_col = (threadid * _args._Nsize) / _args._maxthreads;
- unsigned int last_col = ((threadid + 1) * _args._Nsize) / _args._maxthreads;
-
- for (unsigned int multi=0; multi<_args._nmulti; multi++) {
- compute_col_sums(_params, (last_col - first_col), _args._Ksize, this->_Bptr + (multi * this->_B_multi_stride) + first_col, this->_ldb, _col_sums + (multi * _args._Nsize) + first_col, _args._Ksize, multi, first_col);
- }
- }
-
void requantize_runtime(unsigned int threadid) {
unsigned int first_row = (threadid * _args._Msize) / _args._maxthreads;
unsigned int last_row = ((threadid+1) * _args._Msize) / _args._maxthreads;
@@ -126,16 +110,12 @@ public:
QuantizeWrapper operator=(const QuantizeWrapper &) = delete;
QuantizeWrapper(const GemmArgs &args, const Requantize32 &qp) : _params(qp), _args(args), _barrier(args._maxthreads) {
- GemmArgs newargs = GemmArgs(args._ci, args._Msize, args._Nsize, args._Ksize, args._nbatches, args._nmulti, args._trA, args._trB, Activation(), args._maxthreads, args._pretransposed_hint, nullptr);
+ GemmArgs newargs = GemmArgs(args._ci, args._Msize, args._Nsize, args._Ksize, args._nbatches, args._nmulti, Activation(), args._maxthreads, nullptr);
_subgemm = gemm<To, Tgemm>(newargs);
if (_subgemm == nullptr) {
return;
}
-
- if (!_subgemm->B_is_pretransposed()) {
- _args._pretransposed_hint = false;
- }
}
void set_arrays(const To *A, const int lda, const int A_batch_stride, const int A_multi_stride,
@@ -160,9 +140,6 @@ public:
void execute(const ndcoord_t &work_range, const ndcoord_t &thread_locator, int threadid) override {
_subgemm->execute(work_range, thread_locator, threadid);
- if (!_args._pretransposed_hint) {
- col_sums_runtime(threadid);
- }
_barrier.arrive_and_wait();
@@ -177,7 +154,7 @@ public:
// ptr
// V
- // | subgemm output | row_sums | col_sums (if not pretransposed | subgemm working space |
+ // | subgemm output | row_sums | subgemm working space |
void set_working_space(void *space) override {
uintptr_t space_int = reinterpret_cast<uintptr_t>(space);
@@ -185,16 +162,13 @@ public:
_subgemm->set_working_space(reinterpret_cast<void *>(space_int + local_working_size()));
_row_sums = reinterpret_cast<int32_t *>(space_int + subgemm_output_size());
- if (!_args._pretransposed_hint) {
- _col_sums = reinterpret_cast<int32_t *>(space_int + subgemm_output_size() + row_sum_size());
- }
set_child_arrays();
}
bool B_is_pretransposed() const override {
/* We clear this flag if the subgemm isn't pretransposed, so just return its value */
- return _args._pretransposed_hint;
+ return _subgemm->B_is_pretransposed();
}
bool B_pretranspose_required() const override {
@@ -202,18 +176,10 @@ public:
}
size_t get_B_pretransposed_array_size() const override {
- if (_args._pretransposed_hint) {
- return _subgemm->get_B_pretransposed_array_size() + col_sum_size();
- }
-
- return 0;
+ return _subgemm->get_B_pretransposed_array_size() + col_sum_size();
}
void pretranspose_B_array(void *buffer, const To *B, const int ldb, const int B_multi_stride) override {
- if (!_args._pretransposed_hint) {
- return;
- }
-
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer);
_subgemm->pretranspose_B_array(reinterpret_cast<void *>(buffer_int + col_sum_size()), B, ldb, B_multi_stride);
@@ -223,10 +189,6 @@ public:
}
void set_pretransposed_B_data(void *buffer) override {
- if (!_args._pretransposed_hint) {
- return;
- }
-
uintptr_t buffer_int = reinterpret_cast<uintptr_t>(buffer);
_subgemm->set_pretransposed_B_data(reinterpret_cast<void *>(buffer_int + col_sum_size()));
_col_sums = reinterpret_cast<int32_t *>(buffer);
diff --git a/src/core/NEON/kernels/arm_gemm/quantized.cpp b/src/core/NEON/kernels/arm_gemm/quantized.cpp
index fbf49c8a31..e50dca7f1f 100644
--- a/src/core/NEON/kernels/arm_gemm/quantized.cpp
+++ b/src/core/NEON/kernels/arm_gemm/quantized.cpp
@@ -645,9 +645,9 @@ namespace {
vst1q_s32(row_bias, t0);
break;
+
default:
UNREACHABLE("Impossible.");
-
}
}
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
index 4b838c82a1..ab2a2f3382 100644
--- a/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_fixed.hpp
@@ -45,22 +45,14 @@ class StdTransformsFixed
public:
template<typename TIn>
void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
- const int ymax, const int k0, const int kmax, bool transposed) const {
- if (transposed) {
- Transform<height, block, true>(out, in, stride, y0, ymax, k0, kmax);
- } else {
- Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
- }
+ const int ymax, const int k0, const int kmax) const {
+ Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
}
template<typename TIn>
void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
- const int xmax, const int k0, const int kmax, bool transposed) const {
- if (transposed) {
- Transform<width, block, false>(out, in, stride, x0, xmax, k0, kmax);
- } else {
- Transform<width, block, true>(out, in, stride, x0, xmax, k0, kmax);
- }
+ const int xmax, const int k0, const int kmax) const {
+ Transform<width, block, true>(out, in, stride, x0, xmax, k0, kmax);
}
template<typename TOut>
diff --git a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
index 6b64e5e36c..5b45ccd52a 100644
--- a/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
+++ b/src/core/NEON/kernels/arm_gemm/std_transforms_sve.hpp
@@ -44,22 +44,14 @@ class StdTransformsSVE
public:
template<typename TIn>
void PrepareA(TOperand *out, const TIn *in, const int stride, const int y0,
- const int ymax, const int k0, const int kmax, bool transposed) {
- if (transposed) {
- Transform<height, block, true>(out, in, stride, y0, ymax, k0, kmax);
- } else {
- Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
- }
+ const int ymax, const int k0, const int kmax) {
+ Transform<height, block, false>(out, in, stride, y0, ymax, k0, kmax);
}
template<typename TIn>
void PrepareB(TOperand *out, const TIn *in, const int stride, const int x0,
- const int xmax, const int k0, const int kmax, bool transposed) {
- if (transposed) {
- Transform<width_vectors, block, false, true>(out, in, stride, x0, xmax, k0, kmax);
- } else {
- Transform<width_vectors, block, true, true>(out, in, stride, x0, xmax, k0, kmax);
- }
+ const int xmax, const int k0, const int kmax) {
+ Transform<width_vectors, block, true, true>(out, in, stride, x0, xmax, k0, kmax);
}
template<typename TOut>
diff --git a/src/core/NEON/kernels/assembly/Helpers.cpp b/src/core/NEON/kernels/assembly/Helpers.cpp
deleted file mode 100644
index 5990505a59..0000000000
--- a/src/core/NEON/kernels/assembly/Helpers.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2018-2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "src/core/NEON/kernels/assembly/Helpers.h"
-
-namespace arm_compute
-{
-arm_gemm::KernelDescription get_gemm_info(DataType input_type,
- const CPUInfo &ci,
- const unsigned int num_threads,
- const INEGEMMWrapperKernel::Params &p,
- arm_gemm::Activation activation,
- bool pretranspose_hint)
-{
- switch(input_type)
- {
-#ifdef __aarch64__
- case DataType::QASYMM8:
- case DataType::U8:
- {
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, pretranspose_hint);
- return arm_gemm::get_gemm_method<uint8_t, uint32_t>(args);
- }
- case DataType::S8:
- {
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, pretranspose_hint);
- return arm_gemm::get_gemm_method<int8_t, int32_t>(args);
- }
-#endif // __aarch64__
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
- case DataType::F16:
- {
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, pretranspose_hint);
- return arm_gemm::get_gemm_method<__fp16, __fp16>(args);
- }
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
- case DataType::F32:
- {
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, pretranspose_hint);
- return arm_gemm::get_gemm_method<float, float>(args);
- }
- default:
- return arm_gemm::KernelDescription();
- }
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/assembly/Helpers.h b/src/core/NEON/kernels/assembly/Helpers.h
deleted file mode 100644
index 09c0446ada..0000000000
--- a/src/core/NEON/kernels/assembly/Helpers.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (c) 2018-2020 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_ASSEMBLY_HELPERS_H
-#define ARM_COMPUTE_ASSEMBLY_HELPERS_H
-
-#include "arm_compute/core/CPP/CPPTypes.h"
-#include "arm_compute/core/Utils.h"
-
-#include "arm_compute/core/NEON/kernels/assembly/INEGEMMWrapperKernel.h"
-#include "arm_gemm.hpp"
-
-namespace arm_compute
-{
-/** Block sizes to use to break the M, N, K dimension */
-struct BlockSizes
-{
- unsigned int k_block{ 0 }; /**< Block size alon the K dimension */
- unsigned int x_block{ 0 }; /**< Block size along the N (x) dimension */
- unsigned int m_round{ 0 }; /**< Block size along the M dimension (Must be a multiple of strategy_out_height) */
- unsigned int strategy_out_height{ 0 }; /**< Number of rows (M) processed by the selected strategy */
-};
-
-/** Extracts the kernel description of the selected kernel by the GEMM backend heuristics
- *
- * @param[in] input_type Data type of the input tensor.
- * @param[in] ci CPU information.
- * @param[in] num_threads Maximum number of threads that might be used for the calculations.
- * @param[in] p M, N, K sizes.
- * @param[in] activation Activation struct
- * @param[in] pretranspose_hint Is B also pretransposed ?
- *
- * @return Kernel description that the assembly heuristics picked for the given configuration
- */
-arm_gemm::KernelDescription get_gemm_info(DataType input_type,
- const CPUInfo &ci,
- const unsigned int num_threads,
- const INEGEMMWrapperKernel::Params &p,
- arm_gemm::Activation activation,
- bool pretranspose_hint);
-
-/** Calculate the recommended block sizes to use based on the CPU cache sizes and the strategy which will be used
- *
- * @param[in] ci CPU information.
- * @param[in] M M dimension.
- * @param[in] N N dimension.
- * @param[in] K K dimension.
- *
- * @return Recommeded block sizes to use for the given M, N, K dimensions.
- */
-template <typename strategy>
-BlockSizes calculate_block_sizes(const CPUInfo &ci, unsigned int M, unsigned int N, unsigned int K)
-{
- BlockSizes bs;
-
- using Toi = typename strategy::operand_type;
-
- const unsigned int L1_size = ci.get_L1_cache_size();
- const unsigned int L2_size = ci.get_L2_cache_size();
-
- // Work out blocking parameters
-
- // k_block: Find out how much of the larger array can be loaded into half the cache.
- // This should account for associative caches.
- bs.k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
-
- // Needs to be (at least a single) multiple of the K unroll level.
- bs.k_block /= strategy::k_unroll();
- bs.k_block = std::max(bs.k_block, 1U) * strategy::k_unroll();
-
- // Now tune to presented problem size; this is how many blocks we need.
- int num_k_blocks = DIV_CEIL(K, bs.k_block);
-
- // So divide the space equally into that many blocks.
- bs.k_block = DIV_CEIL(K, num_k_blocks);
-
- // And round UP to the K unroll level required.
- bs.k_block = ceil_to_multiple(bs.k_block, strategy::k_unroll());
-
- // x_block: Work out how many rows (of length k_block) will fit in the L2
- // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
- bs.x_block = (((L2_size * 9) / 10) - (bs.k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height()))) / (sizeof(Toi) * bs.k_block);
-
- // Needs to be (at least a single) multiple of the kernel output width.
- bs.x_block /= strategy::out_width();
- bs.x_block = std::max(bs.x_block, 1U) * strategy::out_width();
-
- // And tune to the presented problem size.
- int num_x_blocks = DIV_CEIL(N, bs.x_block);
- bs.x_block = DIV_CEIL(N, num_x_blocks);
-
- bs.x_block = ceil_to_multiple(bs.x_block, strategy::out_width());
-
- // Work out the rounded size of M - needed for some buffers.
- bs.m_round = ceil_to_multiple(M, strategy::out_height());
- bs.strategy_out_height = strategy::out_height();
-
- return bs;
-}
-
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_ASSEMBLY_HELPERS_H */
diff --git a/src/core/NEON/kernels/assembly/arm_gemm.hpp b/src/core/NEON/kernels/assembly/arm_gemm.hpp
index 2df7132500..58bc9a51d1 100644
--- a/src/core/NEON/kernels/assembly/arm_gemm.hpp
+++ b/src/core/NEON/kernels/assembly/arm_gemm.hpp
@@ -104,19 +104,15 @@ public:
unsigned int _Ksize;
unsigned int _nbatches;
unsigned int _nmulti;
- bool _trA;
- bool _trB;
Activation _act;
int _maxthreads;
- bool _pretransposed_hint;
const GemmConfig *_cfg;
GemmArgs(const CPUInfo *ci, const unsigned int M, const unsigned int N,
const unsigned int K, const unsigned int nbatches,
- const unsigned int nmulti, const bool trA, const bool trB,
- Activation act, const int maxthreads,
- const bool pretransposed_hint, const GemmConfig *cfg = nullptr)
- : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), _trA(trA), _trB(trB), _act(act), _maxthreads(maxthreads), _pretransposed_hint(pretransposed_hint), _cfg(cfg)
+ const unsigned int nmulti, Activation act, const int maxthreads,
+ const GemmConfig *cfg = nullptr)
+ : _ci(ci), _Msize(M), _Nsize(N), _Ksize(K), _nbatches(nbatches), _nmulti(nmulti), _act(act), _maxthreads(maxthreads), _cfg(cfg)
{
}
};
diff --git a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
index 8a2506f39a..98590f10c7 100644
--- a/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
+++ b/src/runtime/NEON/functions/NEGEMMAssemblyDispatch.cpp
@@ -457,7 +457,7 @@ void create_arm_gemm(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &arm_gem
const CPUInfo &ci = NEScheduler::get().cpu_info();
unsigned int num_threads = NEScheduler::get().num_threads();
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, gemm_info.pretranpose_B());
+ arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, activation, num_threads);
// Create arm_gemm fallback
auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput>>();
@@ -475,7 +475,7 @@ void create_arm_gemm_quant(std::unique_ptr<NEGEMMAssemblyDispatch::IFallback> &a
const CPUInfo &ci = NEScheduler::get().cpu_info();
unsigned int num_threads = NEScheduler::get().num_threads();
- arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, false, false, activation, num_threads, gemm_info.pretranpose_B());
+ arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.batches, p.multis, activation, num_threads);
// Create arm_gemm fallback
auto fallback = support::cpp14::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::Requantize32>>();