aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJonathan Deakin <jonathan.deakin@arm.com>2024-01-24 09:15:38 +0000
committerRadu Salavat <radu.salavat@arm.com>2024-04-15 13:52:31 +0000
commita668f9f8a4eab405df0fe8dd58e7d9425bcf9640 (patch)
treedb16e6af9289897557a58755b88d2c337dcb8650 /src
parent34bdffb288d6367cb6dca652ebed60c450854039 (diff)
downloadComputeLibrary-a668f9f8a4eab405df0fe8dd58e7d9425bcf9640.tar.gz
Add s8f32 kernels and dynamic QuantizationInfo
- Add support for QASYMM_SIGNED*QASYMM8_SIGNED->F32 in CpuGemmLowpMatrixMultiplyCore - Add s8f32 kernel using existing s8->s32 kernels with a new DequantizeFloat OutputStage, the structure is similar to Requantize32 but the opposite way around. - Add SME s8f32 kernels with integrated support for DequantizeFloat. - Add scale to CpuGemmLowpOffsetContributionKernel. - Add virtual dequantize scale to gemm_common, only implemented for gemm_interleaved. - Update year to 2024 in generate_build_files. - Add dynamic flag to QuantizationInfo which signals to operators that it can change after configuration - Add support for dynamic quantization in NEGEMMLowpMatrixMultiplyCore - Add dynamic quantization fixture by extending GEMMLowpGenericMatrixMultiplyCoreValidationFixture - Add GEMMLowpDequantizedMatrixMultiplyValidationFixture - Store k (number of cols of A) rather than k_offset in the offset contribution kernels so that we can recompute it when the other offsets change relates to: ONCPUML-1444 MLINFSW-439 Co-authored-by: Milos Puzovic <Milos.Puzovic@arm.com> Co-authored-by: David Mansell <David.Mansell@arm.com> Change-Id: I58a3acf2c09289a303e52eea6b336a696a5bc8da Signed-off-by: Jonathan Deakin <jonathan.deakin@arm.com> Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/11022 Reviewed-by: Gunes Bayir <gunes.bayir@arm.com> Comments-Addressed: Arm Jenkins <bsgcomp@arm.com> Tested-by: Arm Jenkins <bsgcomp@arm.com> Benchmark: Arm Jenkins <bsgcomp@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/BUILD.bazel4
-rw-r--r--src/CMakeLists.txt4
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp108
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp142
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp417
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp448
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp93
-rw-r--r--src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp513
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantized.cpp60
-rw-r--r--src/core/NEON/kernels/arm_gemm/quantized.hpp6
-rw-r--r--src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp324
-rw-r--r--src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h41
-rw-r--r--src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp21
-rw-r--r--src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h26
-rw-r--r--src/cpu/kernels/assembly/arm_gemm.hpp13
-rw-r--r--src/cpu/kernels/assembly/gemm_common.hpp6
-rw-r--r--src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp81
-rw-r--r--src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h9
-rw-r--r--src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp53
21 files changed, 2494 insertions, 61 deletions
diff --git a/src/BUILD.bazel b/src/BUILD.bazel
index 11d988338f..e3cac07de1 100644
--- a/src/BUILD.bazel
+++ b/src/BUILD.bazel
@@ -263,6 +263,9 @@ filegroup(
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp",
+ "core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp",
"core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp",
@@ -518,6 +521,7 @@ filegroup(
"core/NEON/kernels/arm_gemm/gemm_int8.cpp",
"core/NEON/kernels/arm_gemm/gemm_qint8.cpp",
"core/NEON/kernels/arm_gemm/gemm_quint8.cpp",
+ "core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp",
"core/NEON/kernels/arm_gemm/gemm_uint16.cpp",
"core/NEON/kernels/arm_gemm/gemm_uint8.cpp",
"core/NEON/kernels/arm_gemm/interleave-8way.cpp",
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index dbd3028859..984db79c18 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -238,6 +238,9 @@ target_sources(
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_1VLx4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_2VLx2VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8q_mopa_4VLx1VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
+ core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_1VLx4VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_2VLx2VL/generic.cpp
core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8s32_mopa_4VLx1VL/generic.cpp
@@ -509,6 +512,7 @@ target_sources(
core/NEON/kernels/arm_gemm/gemm_int8.cpp
core/NEON/kernels/arm_gemm/gemm_qint8.cpp
core/NEON/kernels/arm_gemm/gemm_quint8.cpp
+ core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
core/NEON/kernels/arm_gemm/gemm_uint16.cpp
core/NEON/kernels/arm_gemm/gemm_uint8.cpp
core/NEON/kernels/arm_gemm/interleave-8way.cpp
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
index d8b464584a..ae344f09b5 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp
@@ -29,7 +29,6 @@
#include "arm_gemm.hpp"
#include "bfloat.hpp"
#include "convolver.hpp"
-#include "kernel_weight_format.hpp"
#include "kernel_traits.hpp"
#include "kernel_weight_format.hpp"
#include "mergeresults.hpp"
@@ -247,6 +246,84 @@ void kernel_and_merge<true, false, Requantize32>::run(
}
}
+// Run a kernel with integrated merge, dequantizing to FP32
+template<>
+template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+void kernel_and_merge<false, false, DequantizeFloat>::run(
+#ifdef CYCLE_PROFILING
+ profiler &prof,
+#endif
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
+ Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
+ unsigned int n_0, unsigned int n_max, const Tr *bias,
+ const Activation &act, bool accumulate, const DequantizeFloat &dq, const int32_t *col_bias,
+ Tab *acc_buff)
+{
+#ifdef CYCLE_PROFILING
+ auto p=prof.ScopedProfiler(PROFILE_KERNEL, (m_max - m_0) * (n_max - n_0) * kern_k);
+#endif
+
+ const int32_t *offset_col_bias = nullptr;
+ const Tr *offset_bias = nullptr;
+
+ if (col_bias) {
+ offset_col_bias = col_bias + n_0;
+ }
+
+ if (bias) {
+ offset_bias = bias + n_0;
+ }
+
+ strat.kernel(// A and B pointers are just the packed panels.
+ a_ptr, b_panel,
+ // Provide relevant part of output array and row stride.
+ c_ptr ? (c_ptr + m_0 * ldc + n_0) : nullptr, ldc,
+ // M, N, K sizes
+ m_max-m_0, n_max - n_0, kern_k,
+ // Bias, activation, accumulation. Need to offset the bias as needed.
+ offset_col_bias, dq, offset_bias, act, accumulate, acc_buff);
+}
+
+template<>
+template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
+void kernel_and_merge<true, false, DequantizeFloat>::run(
+#ifdef CYCLE_PROFILING
+ profiler &prof,
+#endif
+ strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
+ Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
+ unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *bias,
+ const Activation &act, bool accumulate, const DequantizeFloat &qp, const int32_t *,
+ Tab *)
+{
+ const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
+
+ {
+#ifdef CYCLE_PROFILING
+ auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k));
+#endif
+
+ strat.kernel(a_ptr, b_panel, c_panel, 1, bblocks, kern_k);
+ }
+
+ {
+#ifdef CYCLE_PROFILING
+ auto p=prof.ScopedProfiler(PROFILE_QUANTIZE, ((m_max-m_0) * bblocks * strategy::out_width() * sizeof(Tr)));
+#endif
+ auto out_area = strategy::out_width() * strategy::out_height();
+ for (int i=0; i<bblocks; i++) {
+ const unsigned int n_start = n_0 + (strategy::out_width() * i);
+ const unsigned int n_end = std::min(n_start + strategy::out_width(), n_max);
+
+ dequantize_block_32(qp, (n_end - n_start), (m_max - m_0),
+ c_panel + (i * out_area), strategy::out_width(),
+ c_ptr + m_0 * ldc + n_start, ldc,
+ bias != nullptr ? bias + n_start : nullptr, accumulate, act);
+
+ }
+ }
+}
+
// Integer GEMMs can be used in two contexts - "normal" where the full 32-bit output is required, or in
// "requantizing" context where the output will be requantized.
//
@@ -280,6 +357,12 @@ public:
typedef int32_t type;
};
+template<typename strategy>
+class accumulate_buffer_type<strategy, DequantizeFloat, false> {
+public:
+ typedef int32_t type;
+};
+
template<typename strategy, typename OutputStage>
class accumulate_buffer_type<strategy, OutputStage, true> {
public:
@@ -764,6 +847,9 @@ public:
const bool first_pass = (k0==0);
const bool last_pass = (kmax==_Ktotal);
+ // Bias is passed for the first pass only, except for dequantizefloat nomerge cases where it's the last pass.
+ const bool bias_pass = (std::is_same<OutputStage, DequantizeFloat>::value && !MergeStep) ? last_pass : first_pass;
+
// Figure out how many "K" the kernel will actually process.
unsigned int kern_k = roundup(kmax - k0, strategy::k_unroll());
@@ -822,7 +908,7 @@ public:
// K size, and M/N ranges
kern_k, start_row, end_row, start_x, end_x,
// Only do bias on the first pass
- ((first_pass && this->_bias) ? this->_bias + (multi * this->_bias_multi_stride) : nullptr),
+ ((bias_pass && this->_bias) ? this->_bias + (multi * this->_bias_multi_stride) : nullptr),
// Only do activation on the last pass, and accumulation on any non-first pass.
(last_pass ? _act : Activation()), (!first_pass || _accumulate),
// Pass in quantization parameters for requantizing kernels (others will ignore)
@@ -949,6 +1035,9 @@ public:
const bool first_pass = (current.k0() == 0);
const bool last_pass = (current.kmax() == _Ktotal);
+ // Bias is passed for the first pass only, except for dequantizefloat nomerge cases where it's the last pass.
+ const bool bias_pass = (std::is_same<OutputStage, DequantizeFloat>::value && !MergeStep) ? last_pass : first_pass;
+
// Pointer to appropriate part of result array.
Tr *result_ptr = this->_Cptr + (batch * this->_C_batch_stride) + (current.multi() * this->_C_multi_stride);
@@ -970,7 +1059,7 @@ public:
// K size, and M/N ranges
kern_k, y, ymax, current.x0(), current.xmax(),
// Only do bias on the first pass
- ((first_pass && this->_bias) ? this->_bias + (current.multi() * this->_bias_multi_stride) : nullptr),
+ ((bias_pass && this->_bias) ? this->_bias + (current.multi() * this->_bias_multi_stride) : nullptr),
// Only do activation on the last pass, and accumulation on any non-first pass.
(last_pass ? _act : Activation()), (!first_pass || _accumulate),
// Pass in quantization parameters for requantizing kernels (others will ignore)
@@ -1185,6 +1274,13 @@ public:
}
}
+ void set_dequantize_scale(const float scale) override {
+ if(std::is_same<OutputStage, DequantizeFloat>::value) {
+ DequantizeFloat* df = reinterpret_cast<DequantizeFloat *>(&_os);
+ df->scale = scale;
+ }
+ }
+
void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
assert(string_len == _Ksize);
_indirect_buf = ptr;
@@ -1249,4 +1345,10 @@ using GemmInterleavedPretransposedNoMergeQuantizedInline = GemmInterleaved<strat
template<typename strategy, typename To, typename Tr>
using GemmInterleavedQuantized = GemmInterleaved<strategy, To, Tr, Requantize32>;
+template<typename strategy, typename To, typename Tr>
+using GemmInterleavedNoMergeDequantized = GemmInterleaved<strategy, To, Tr, DequantizeFloat, false>;
+
+template<typename strategy, typename To, typename Tr>
+using GemmInterleavedDequantized = GemmInterleaved<strategy, To, Tr, DequantizeFloat>;
+
} // namespace arm_gemm
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
new file mode 100644
index 0000000000..782399df8c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/gemm_s8fp32.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef __aarch64__
+
+#include "arm_gemm.hpp"
+
+#include "kernels/a64_gemm_s16_8x12.hpp"
+#include "kernels/a64_gemm_s8_8x12.hpp"
+#include "kernels/a64_gemm_s8_4x4.hpp"
+#include "kernels/a64_interleaved_s8s32_mmla_8x12.hpp"
+
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp"
+#include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
+
+#include "gemm_implementation.hpp"
+#include "gemm_interleaved.hpp"
+#include "utils.hpp"
+
+#include <cstdint>
+#include <vector>
+namespace arm_gemm {
+
+static const GemmImplementation<int8_t, float, DequantizeFloat> gemm_s8fp32_methods[] =
+{
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { const auto VL = sme::get_vector_length<float>();
+ return args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ [](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL, int8_t, float>(args, dq); }
+},
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_s8qfp32_mopa_4Vx1VL.hpp",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+ [](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL, int8_t, float>(args, dq); }
+},
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_s8qfp32_mopa_2Vx2VL.hpp",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sme2(); },
+ nullptr,
+ [](const GemmArgs &args, const DequantizeFloat &dq) { return new GemmInterleavedNoMergeDequantized<cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL, int8_t, float>(args, dq); }
+},
+#endif // ARM_COMPUTE_ENABLE_SME2
+GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_s8s32_mmla_8x3VL",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_svei8mm(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, float>(args, qp); }
+),
+GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_s8s32_dot_8x3VL",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_sve(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, float>(args, qp); }
+),
+#endif // ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_interleaved_s8s32_mmla_8x12",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_i8mm(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, float>(args, qp); }
+),
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_gemm_s16_8x12",
+ nullptr,
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s16_8x12, int8_t, float>(args, qp); }
+},
+GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_gemm_s8_8x12",
+ [](const GemmArgs &args, const DequantizeFloat &) { return args._ci->has_dotprod(); },
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_8x12, int8_t, float>(args, qp); }
+),
+GemmImplementation<int8_t, float, DequantizeFloat>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_gemm_s8_4x4",
+ nullptr,
+ [](const GemmArgs &args, const DequantizeFloat &) { return GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, float>::estimate_cycles<int8_t>(args); },
+ [](const GemmArgs &args, const DequantizeFloat &qp) { return new GemmInterleavedDequantized<cls_a64_gemm_s8_4x4, int8_t, float>(args, qp); }
+),
+{
+ GemmMethod::DEFAULT,
+ "",
+ nullptr,
+ nullptr,
+ nullptr
+}
+};
+
+template<>
+const GemmImplementation<int8_t, float, DequantizeFloat> *gemm_implementation_list<int8_t, float, DequantizeFloat>() {
+ return gemm_s8fp32_methods;
+}
+
+template UniqueGemmCommon<int8_t, float> gemm<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template KernelDescription get_gemm_method<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+template std::vector<KernelDescription> get_compatible_kernels<int8_t, float, DequantizeFloat>(const GemmArgs &args, const DequantizeFloat &os);
+
+} // namespace arm_gemm
+
+#endif // __aarch64__ \ No newline at end of file
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp
new file mode 100644
index 0000000000..7792192856
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL
+{
+public:
+ typedef int8_t operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<int32_t>() * 1;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<int32_t>() * 4;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL;
+
+ StdTransformsSME<operand_type, result_type, 1, 4, 4> transforms = {};
+
+ cls_sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
new file mode 100644
index 0000000000..4b26a6578c
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL/generic.cpp
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8qfp32_mopa_1VLx4VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const int8_t *const A,
+ const int8_t *const B,
+ float *const C, const int ldc,
+ const int M, const int N, const int K,
+ const int32_t *const bias, const float *const late_bias, const Activation act,
+ bool accumulate,
+ int32_t *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+ C(C), ldcb(ldc * sizeof(float)),
+ M(M), N(N), K(K),
+ min(-std::numeric_limits<float>::infinity()),
+ max(std::numeric_limits<float>::infinity()),
+ bias(bias), late_bias(late_bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<float>(0);
+ break;
+ }
+ }
+
+ const int8_t *const A;
+ const int8_t *const B;
+ const long kstride_bytes;
+ float *const C;
+ const long ldcb;
+ const long M, N, K;
+ float min = -std::numeric_limits<float>::infinity();
+ float max = std::numeric_limits<float>::infinity();
+
+ const int32_t *const bias;
+ const float *const late_bias;
+
+ int32_t *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x13, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p0.b\n"
+ ".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x11, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x10, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x13, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xc0840583 // mova za3h.s[x12], { z12.s-z15.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w9, [%x[args], %[offsetof_M]]\n"
+ "mov x28, #0x0\n"
+ "mov x27, #0x0\n"
+ "ldr w26, [%x[args], %[offsetof_N]]\n"
+ "ldr x25, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x24, x25\n"
+ ".inst 0x25ba6770 // whilelt pn8.s, x27, x26, VLx4\n"
+ "tbnz x13, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ ".inst 0xa01bc288 // ld1w { z8.s-z11.s }, p8/Z, [x20, x27, LSL #2]\n"
+ ".inst 0xc0900100 // addha za0.s, p0/M, p0/M, z8.s\n"
+ ".inst 0xc0900121 // addha za1.s, p0/M, p0/M, z9.s\n"
+ ".inst 0xc0900142 // addha za2.s, p0/M, p0/M, z10.s\n"
+ ".inst 0xc0900163 // addha za3.s, p0/M, p0/M, z11.s\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x27\n"
+ "mov x21, x28\n"
+ "incw x20, ALL, MUL #4\n"
+ "incw x21\n"
+ "cmp x20, x26\n"
+ "mov x20, x13\n"
+ "csel x21, x28, x21, LT\n"
+ "bfm x13, XZR, #0x0, #0x0 // bfc x13, #0x0, #0x1\n"
+ "cmp x21, x9\n"
+ "csel x13, x20, x13, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x3\n"
+ "lsr x20, x20, #0x2\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x27, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ "ld1b { z31.b }, p0/Z, [x24]\n"
+ ".inst 0xa04086e8 // ld1b { z8.b-z11.b }, pn9.b/Z, [x23]\n"
+ "ld1b { z1.b }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa04186e4 // ld1b { z4.b-z7.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ "ld1b { z0.b }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286ec // ld1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ "ld1b { z3.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386f0 // ld1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0xa08803e0 // smopa za0.s, p0/M, p0/M, z31.b, z8.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa08903e1 // smopa za1.s, p0/M, p0/M, z31.b, z9.b\n"
+ ".inst 0xa08a03e2 // smopa za2.s, p0/M, p0/M, z31.b, z10.b\n"
+ ".inst 0xa08b03e3 // smopa za3.s, p0/M, p0/M, z31.b, z11.b\n"
+ "ld1b { z31.b }, p0/Z, [x24]\n"
+ ".inst 0xa0840020 // smopa za0.s, p0/M, p0/M, z1.b, z4.b\n"
+ ".inst 0xa04086e8 // ld1b { z8.b-z11.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa0850021 // smopa za1.s, p0/M, p0/M, z1.b, z5.b\n"
+ ".inst 0xa0860022 // smopa za2.s, p0/M, p0/M, z1.b, z6.b\n"
+ ".inst 0xa0870023 // smopa za3.s, p0/M, p0/M, z1.b, z7.b\n"
+ "ld1b { z1.b }, p0/Z, [x24, #1, MUL VL]\n"
+ ".inst 0xa08c0000 // smopa za0.s, p0/M, p0/M, z0.b, z12.b\n"
+ ".inst 0xa04186e4 // ld1b { z4.b-z7.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa08d0001 // smopa za1.s, p0/M, p0/M, z0.b, z13.b\n"
+ ".inst 0xa08e0002 // smopa za2.s, p0/M, p0/M, z0.b, z14.b\n"
+ ".inst 0xa08f0003 // smopa za3.s, p0/M, p0/M, z0.b, z15.b\n"
+ "ld1b { z0.b }, p0/Z, [x24, #2, MUL VL]\n"
+ ".inst 0xa04286ec // ld1b { z12.b-z15.b }, pn9.b/Z, [x23, #0x8, MUL VL]\n"
+ ".inst 0xa0900060 // smopa za0.s, p0/M, p0/M, z3.b, z16.b\n"
+ ".inst 0xa0910061 // smopa za1.s, p0/M, p0/M, z3.b, z17.b\n"
+ ".inst 0xa0920062 // smopa za2.s, p0/M, p0/M, z3.b, z18.b\n"
+ ".inst 0xa0930063 // smopa za3.s, p0/M, p0/M, z3.b, z19.b\n"
+ "ld1b { z3.b }, p0/Z, [x24, #3, MUL VL]\n"
+ "addvl x24, x24, #4\n"
+ ".inst 0xa04386f0 // ld1b { z16.b-z19.b }, pn9.b/Z, [x23, #0xc, MUL VL]\n"
+ "addvl x23, x23, #16\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0xa08803e0 // smopa za0.s, p0/M, p0/M, z31.b, z8.b\n"
+ ".inst 0xa08903e1 // smopa za1.s, p0/M, p0/M, z31.b, z9.b\n"
+ ".inst 0xa08a03e2 // smopa za2.s, p0/M, p0/M, z31.b, z10.b\n"
+ ".inst 0xa08b03e3 // smopa za3.s, p0/M, p0/M, z31.b, z11.b\n"
+ ".inst 0xa0840020 // smopa za0.s, p0/M, p0/M, z1.b, z4.b\n"
+ ".inst 0xa0850021 // smopa za1.s, p0/M, p0/M, z1.b, z5.b\n"
+ ".inst 0xa0860022 // smopa za2.s, p0/M, p0/M, z1.b, z6.b\n"
+ ".inst 0xa0870023 // smopa za3.s, p0/M, p0/M, z1.b, z7.b\n"
+ ".inst 0xa08c0000 // smopa za0.s, p0/M, p0/M, z0.b, z12.b\n"
+ ".inst 0xa08d0001 // smopa za1.s, p0/M, p0/M, z0.b, z13.b\n"
+ ".inst 0xa08e0002 // smopa za2.s, p0/M, p0/M, z0.b, z14.b\n"
+ ".inst 0xa08f0003 // smopa za3.s, p0/M, p0/M, z0.b, z15.b\n"
+ ".inst 0xa0900060 // smopa za0.s, p0/M, p0/M, z3.b, z16.b\n"
+ ".inst 0xa0910061 // smopa za1.s, p0/M, p0/M, z3.b, z17.b\n"
+ ".inst 0xa0920062 // smopa za2.s, p0/M, p0/M, z3.b, z18.b\n"
+ ".inst 0xa0930063 // smopa za3.s, p0/M, p0/M, z3.b, z19.b\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ "ld1b { z18.b }, p0/Z, [x24]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x24, x24, #1\n"
+ ".inst 0xa04086fc // ld1b { z28.b-z31.b }, pn9.b/Z, [x23]\n"
+ "addvl x23, x23, #4\n"
+ ".inst 0xa09c0240 // smopa za0.s, p0/M, p0/M, z18.b, z28.b\n"
+ ".inst 0xa09d0241 // smopa za1.s, p0/M, p0/M, z18.b, z29.b\n"
+ ".inst 0xa09e0242 // smopa za2.s, p0/M, p0/M, z18.b, z30.b\n"
+ ".inst 0xa09f0243 // smopa za3.s, p0/M, p0/M, z18.b, z31.b\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x13, #1, 14f\n"
+ "tbz x13, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xa041c57c // ld1w { z28.s-z31.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa042c578 // ld1w { z24.s-z27.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840400 // mova za0h.s[x12], { z0.s-z3.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa060c548 // st1w { z8.s-z11.s }, pn9.b, [x10]\n"
+ ".inst 0xc0840702 // mova za2h.s[x12], { z24.s-z27.s }\n"
+ ".inst 0xa061c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c544 // st1w { z4.s-z7.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ "blt 11b\n"
+ "b 21f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xa060c544 // st1w { z4.s-z7.s }, pn9.b, [x10]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c550 // st1w { z16.s-z19.s }, pn9.b, [x10, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c548 // st1w { z8.s-z11.s }, pn9.b, [x10, #0x8, MUL VL]\n"
+ ".inst 0xa063c54c // st1w { z12.s-z15.s }, pn9.b, [x10, #0xc, MUL VL]\n"
+ "addvl x10, x10, #16\n"
+ "blt 13b\n"
+ "b 21f\n"
+ "14:" // Store to output array
+ "ldr x23, [%x[args], %[offsetof_C]]\n"
+ "sub x21, x9, x28\n"
+ "ld1rw { z18.s }, p0/Z, [%x[dq], %[offset_DequantizeFloat_scale]]\n"
+ "fmov z20.s, #0x0\n"
+ "ldr x22, [%x[args], %[offsetof_ldcb]]\n"
+ "fmov z21.s, #0x0\n"
+ "fmov z22.s, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_late_bias]]\n"
+ "fmov z23.s, #0x0\n"
+ "add x23, x23, x27, LSL #2\n" // C += n
+ "madd x23, x28, x22, x23\n" // C += m * ldc
+ "cbz x20, 15f\n"
+ "add x20, x20, x27, LSL #2\n"
+ ".inst 0xa040c294 // ld1w { z20.s-z23.s }, p8/Z, [x20]\n"
+ "15:" // Store to output array: no late bias
+ "cntw x20\n"
+ "ld1rw { z17.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x21, x20\n"
+ "ld1rw { z16.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "csel x20, x21, x20, LT\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "cbz x21, 17f\n"
+ "16:" // Store to output array: Accumulator row 0 loop
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xc132e000 // scvtf { z0.s-z3.s }, { z0.s-z3.s }\n"
+ ".inst 0xc132e084 // scvtf { z4.s-z7.s }, { z4.s-z7.s }\n"
+ ".inst 0xc132e108 // scvtf { z8.s-z11.s }, { z8.s-z11.s }\n"
+ ".inst 0xc132e18c // scvtf { z12.s-z15.s }, { z12.s-z15.s }\n"
+ "fmad z0.s, p0/M, z18.s, z20.s\n"
+ "fmad z1.s, p0/M, z18.s, z20.s\n"
+ "fmad z2.s, p0/M, z18.s, z20.s\n"
+ "fmad z3.s, p0/M, z18.s, z20.s\n"
+ "add x12, x12, #0x4\n"
+ "fmad z4.s, p0/M, z18.s, z21.s\n"
+ "fmad z5.s, p0/M, z18.s, z21.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z6.s, p0/M, z18.s, z21.s\n"
+ "fmad z7.s, p0/M, z18.s, z21.s\n"
+ "fmad z8.s, p0/M, z18.s, z22.s\n"
+ "fmad z9.s, p0/M, z18.s, z22.s\n"
+ "fmad z10.s, p0/M, z18.s, z22.s\n"
+ "fmad z11.s, p0/M, z18.s, z22.s\n"
+ "fmad z12.s, p0/M, z18.s, z23.s\n"
+ "fmad z13.s, p0/M, z18.s, z23.s\n"
+ "fmad z14.s, p0/M, z18.s, z23.s\n"
+ "fmad z15.s, p0/M, z18.s, z23.s\n"
+ ".inst 0xc1b0ca20 // fclamp { z0.s-z3.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
+ ".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ ".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ ".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ ".inst 0xa160c2e3 // st1w { z3.s, z7.s, z11.s, z15.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ "blt 16b\n"
+ "17:" // Store to output array: Accumulator row 0 oddments
+ "cbz x20, 18f\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xc0860448 // mova { z8.s-z11.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xc132e000 // scvtf { z0.s-z3.s }, { z0.s-z3.s }\n"
+ ".inst 0xc132e084 // scvtf { z4.s-z7.s }, { z4.s-z7.s }\n"
+ ".inst 0xc132e108 // scvtf { z8.s-z11.s }, { z8.s-z11.s }\n"
+ ".inst 0xc132e18c // scvtf { z12.s-z15.s }, { z12.s-z15.s }\n"
+ "fmad z0.s, p0/M, z18.s, z20.s\n"
+ "fmad z1.s, p0/M, z18.s, z20.s\n"
+ "fmad z2.s, p0/M, z18.s, z20.s\n"
+ "fmad z3.s, p0/M, z18.s, z20.s\n"
+ "subs x20, x20, #0x1\n"
+ "fmad z4.s, p0/M, z18.s, z21.s\n"
+ "fmad z5.s, p0/M, z18.s, z21.s\n"
+ "fmad z6.s, p0/M, z18.s, z21.s\n"
+ "fmad z7.s, p0/M, z18.s, z21.s\n"
+ "fmad z8.s, p0/M, z18.s, z22.s\n"
+ "fmad z9.s, p0/M, z18.s, z22.s\n"
+ "fmad z10.s, p0/M, z18.s, z22.s\n"
+ "fmad z11.s, p0/M, z18.s, z22.s\n"
+ "fmad z12.s, p0/M, z18.s, z23.s\n"
+ "fmad z13.s, p0/M, z18.s, z23.s\n"
+ "fmad z14.s, p0/M, z18.s, z23.s\n"
+ "fmad z15.s, p0/M, z18.s, z23.s\n"
+ ".inst 0xc1b0ca20 // fclamp { z0.s-z3.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca24 // fclamp { z4.s-z7.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca28 // fclamp { z8.s-z11.s }, z17.s, z16.s\n"
+ ".inst 0xc1b0ca2c // fclamp { z12.s-z15.s }, z17.s, z16.s\n"
+ ".inst 0xa160c2e0 // st1w { z0.s, z4.s, z8.s, z12.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ "beq 18f\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa160c2e1 // st1w { z1.s, z5.s, z9.s, z13.s }, p8, [x23]\n"
+ "add x23, x23, x22\n"
+ "beq 18f\n"
+ ".inst 0xa160c2e2 // st1w { z2.s, z6.s, z10.s, z14.s }, p8, [x23]\n"
+ "18:" // Store to output array: Accumulator row 0 oddments: End
+ "19:" // Store to output array: End
+ "tbz x13, #0, 21f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "20:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c574 // ld1w { z20.s-z23.s }, pn9.b/Z, [x11]\n"
+ ".inst 0xa041c56c // ld1w { z12.s-z15.s }, pn9.b/Z, [x11, #0x4, MUL VL]\n"
+ ".inst 0xa042c560 // ld1w { z0.s-z3.s }, pn9.b/Z, [x11, #0x8, MUL VL]\n"
+ ".inst 0xa043c568 // ld1w { z8.s-z11.s }, pn9.b/Z, [x11, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x11, x11, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 20b\n"
+ "21:" // End block
+ "incw x27, ALL, MUL #4\n"
+ "cmp x27, x26\n"
+ "blt 3b\n"
+ "incw x28\n"
+ "mov x27, #0x0\n"
+ "cmp x28, x9\n"
+ "mov x25, x24\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [dq] "r" (&dq), [offset_DequantizeFloat_scale] "I" (offsetof(DequantizeFloat, scale)), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_late_bias] "I" (offsetof(KernelArgs, late_bias)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp
new file mode 100644
index 0000000000..df2c9c0ca3
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL
+{
+public:
+ typedef int8_t operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<int32_t>() * 2;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<int32_t>() * 2;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL;
+
+ StdTransformsSME<operand_type, result_type, 2, 2, 4> transforms = {};
+
+ cls_sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
new file mode 100644
index 0000000000..1631fae8e9
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL/generic.cpp
@@ -0,0 +1,448 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8qfp32_mopa_2VLx2VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const int8_t *const A,
+ const int8_t *const B,
+ float *const C, const int ldc,
+ const int M, const int N, const int K,
+ const int32_t *const bias, const float *const late_bias, const Activation act,
+ bool accumulate,
+ int32_t *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+ C(C), ldcb(ldc * sizeof(float)),
+ M(M), N(N), K(K),
+ min(-std::numeric_limits<float>::infinity()),
+ max(std::numeric_limits<float>::infinity()),
+ bias(bias), late_bias(late_bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<float>(0);
+ break;
+ }
+ }
+
+ const int8_t *const A;
+ const int8_t *const B;
+ const long kstride_bytes;
+ float *const C;
+ const long ldcb;
+ const long M, N, K;
+ float min = -std::numeric_limits<float>::infinity();
+ float max = std::numeric_limits<float>::infinity();
+
+ const int32_t *const bias;
+ const float *const late_bias;
+
+ int32_t *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p0.b\n"
+ ".inst 0x25207811 // ptrue pn9.b\n"
+ "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x16, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e0 // ld1w { z0.s-z3.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f8 // ld1w { z24.s-z27.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840681 // mova za1h.s[x12], { z20.s-z23.s }\n"
+ ".inst 0xc0840402 // mova za2h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840703 // mova za3h.s[x12], { z24.s-z27.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w13, [%x[args], %[offsetof_M]]\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_N]]\n"
+ "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x27, x28\n"
+ ".inst 0x25a94550 // whilelt pn8.s, x10, x9, VLx2\n"
+ "tbnz x16, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ ".inst 0xa10a4286 // ld1w { z6.s, z14.s }, p8/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc09000c0 // addha za0.s, p0/M, p0/M, z6.s\n"
+ ".inst 0xc09001c1 // addha za1.s, p0/M, p0/M, z14.s\n"
+ ".inst 0xc09000c2 // addha za2.s, p0/M, p0/M, z6.s\n"
+ ".inst 0xc09001c3 // addha za3.s, p0/M, p0/M, z14.s\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x10\n"
+ "mov x21, x11\n"
+ "incw x20, ALL, MUL #2\n"
+ "incw x21, ALL, MUL #2\n"
+ "cmp x20, x9\n"
+ "mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
+ "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
+ "cmp x21, x13\n"
+ "csel x16, x20, x16, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x3\n"
+ "lsr x20, x20, #0x2\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa1400775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa04006f2 // ld1b { z18.b-z19.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa041076a // ld1b { z10.b-z11.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa14106e5 // ld1b { z5.b, z13.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa1420767 // ld1b { z7.b, z15.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206f0 // ld1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa1430774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ ".inst 0xa14306f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0xa09202a0 // smopa za0.s, p0/M, p0/M, z21.b, z18.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa09302a1 // smopa za1.s, p0/M, p0/M, z21.b, z19.b\n"
+ ".inst 0xa09203a2 // smopa za2.s, p0/M, p0/M, z29.b, z18.b\n"
+ ".inst 0xa09303a3 // smopa za3.s, p0/M, p0/M, z29.b, z19.b\n"
+ ".inst 0xa1400775 // ld1b { z21.b, z29.b }, pn9.b/Z, [x27]\n"
+ ".inst 0xa0850140 // smopa za0.s, p0/M, p0/M, z10.b, z5.b\n"
+ ".inst 0xa04006f2 // ld1b { z18.b-z19.b }, pn9.b/Z, [x23]\n"
+ ".inst 0xa08d0141 // smopa za1.s, p0/M, p0/M, z10.b, z13.b\n"
+ ".inst 0xa0850162 // smopa za2.s, p0/M, p0/M, z11.b, z5.b\n"
+ ".inst 0xa08d0163 // smopa za3.s, p0/M, p0/M, z11.b, z13.b\n"
+ ".inst 0xa041076a // ld1b { z10.b-z11.b }, pn9.b/Z, [x27, #0x2, MUL VL]\n"
+ ".inst 0xa09000e0 // smopa za0.s, p0/M, p0/M, z7.b, z16.b\n"
+ ".inst 0xa14106e5 // ld1b { z5.b, z13.b }, pn9.b/Z, [x23, #0x2, MUL VL]\n"
+ ".inst 0xa09800e1 // smopa za1.s, p0/M, p0/M, z7.b, z24.b\n"
+ ".inst 0xa09001e2 // smopa za2.s, p0/M, p0/M, z15.b, z16.b\n"
+ ".inst 0xa09801e3 // smopa za3.s, p0/M, p0/M, z15.b, z24.b\n"
+ ".inst 0xa1420767 // ld1b { z7.b, z15.b }, pn9.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa14206f0 // ld1b { z16.b, z24.b }, pn9.b/Z, [x23, #0x4, MUL VL]\n"
+ ".inst 0xa0970280 // smopa za0.s, p0/M, p0/M, z20.b, z23.b\n"
+ ".inst 0xa09f0281 // smopa za1.s, p0/M, p0/M, z20.b, z31.b\n"
+ ".inst 0xa0970382 // smopa za2.s, p0/M, p0/M, z28.b, z23.b\n"
+ ".inst 0xa09f0383 // smopa za3.s, p0/M, p0/M, z28.b, z31.b\n"
+ ".inst 0xa1430774 // ld1b { z20.b, z28.b }, pn9.b/Z, [x27, #0x6, MUL VL]\n"
+ "addvl x27, x27, #8\n"
+ ".inst 0xa14306f7 // ld1b { z23.b, z31.b }, pn9.b/Z, [x23, #0x6, MUL VL]\n"
+ "addvl x23, x23, #8\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0xa09202a0 // smopa za0.s, p0/M, p0/M, z21.b, z18.b\n"
+ ".inst 0xa09302a1 // smopa za1.s, p0/M, p0/M, z21.b, z19.b\n"
+ ".inst 0xa09203a2 // smopa za2.s, p0/M, p0/M, z29.b, z18.b\n"
+ ".inst 0xa09303a3 // smopa za3.s, p0/M, p0/M, z29.b, z19.b\n"
+ ".inst 0xa0850140 // smopa za0.s, p0/M, p0/M, z10.b, z5.b\n"
+ ".inst 0xa08d0141 // smopa za1.s, p0/M, p0/M, z10.b, z13.b\n"
+ ".inst 0xa0850162 // smopa za2.s, p0/M, p0/M, z11.b, z5.b\n"
+ ".inst 0xa08d0163 // smopa za3.s, p0/M, p0/M, z11.b, z13.b\n"
+ ".inst 0xa09000e0 // smopa za0.s, p0/M, p0/M, z7.b, z16.b\n"
+ ".inst 0xa09800e1 // smopa za1.s, p0/M, p0/M, z7.b, z24.b\n"
+ ".inst 0xa09001e2 // smopa za2.s, p0/M, p0/M, z15.b, z16.b\n"
+ ".inst 0xa09801e3 // smopa za3.s, p0/M, p0/M, z15.b, z24.b\n"
+ ".inst 0xa0970280 // smopa za0.s, p0/M, p0/M, z20.b, z23.b\n"
+ ".inst 0xa09f0281 // smopa za1.s, p0/M, p0/M, z20.b, z31.b\n"
+ ".inst 0xa0970382 // smopa za2.s, p0/M, p0/M, z28.b, z23.b\n"
+ ".inst 0xa09f0383 // smopa za3.s, p0/M, p0/M, z28.b, z31.b\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ ".inst 0xa040077e // ld1b { z30.b-z31.b }, pn9.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x27, x27, #2\n"
+ ".inst 0xa14006e7 // ld1b { z7.b, z15.b }, pn9.b/Z, [x23]\n"
+ "addvl x23, x23, #2\n"
+ ".inst 0xa08703c0 // smopa za0.s, p0/M, p0/M, z30.b, z7.b\n"
+ ".inst 0xa08f03c1 // smopa za1.s, p0/M, p0/M, z30.b, z15.b\n"
+ ".inst 0xa08703e2 // smopa za2.s, p0/M, p0/M, z31.b, z7.b\n"
+ ".inst 0xa08f03e3 // smopa za3.s, p0/M, p0/M, z31.b, z15.b\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x16, #1, 14f\n"
+ "tbz x16, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc0860428 // mova { z8.s-z11.s }, za1h.s[x12]\n"
+ ".inst 0xa041c5f0 // ld1w { z16.s-z19.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860440 // mova { z0.s-z3.s }, za2h.s[x12]\n"
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ ".inst 0xa042c5fc // ld1w { z28.s-z31.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840580 // mova za0h.s[x12], { z12.s-z15.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840601 // mova za1h.s[x12], { z16.s-z19.s }\n"
+ ".inst 0xa060c5c4 // st1w { z4.s-z7.s }, pn9.b, [x14]\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa061c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c5d8 // st1w { z24.s-z27.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 11b\n"
+ "b 24f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ ".inst 0xc0860468 // mova { z8.s-z11.s }, za3h.s[x12]\n"
+ ".inst 0xa060c5c0 // st1w { z0.s-z3.s }, pn9.b, [x14]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c5cc // st1w { z12.s-z15.s }, pn9.b, [x14, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c5d0 // st1w { z16.s-z19.s }, pn9.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c5c8 // st1w { z8.s-z11.s }, pn9.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 13b\n"
+ "b 24f\n"
+ "14:" // Store to output array
+ "ldr x26, [%x[args], %[offsetof_C]]\n"
+ "sub x25, x13, x11\n"
+ "ld1rw { z3.s }, p0/Z, [%x[dq], %[offset_DequantizeFloat_scale]]\n"
+ "fmov z2.s, #0x0\n"
+ "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "fmov z10.s, #0x0\n"
+ "ldr x20, [%x[args], %[offsetof_late_bias]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
+ "madd x26, x11, x24, x26\n" // C += m * ldc
+ "cbz x20, 15f\n"
+ "add x20, x20, x10, LSL #2\n"
+ ".inst 0xa1404282 // ld1w { z2.s, z10.s }, p8/Z, [x20]\n"
+ "15:" // Store to output array: no late bias
+ "cntw x23\n"
+ "ld1rw { z1.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x25, x23\n"
+ "ld1rw { z0.s }, p0/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "csel x22, x25, x23, LT\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 17f\n"
+ "16:" // Store to output array: Accumulator row 0 loop
+ ".inst 0xc0860404 // mova { z4.s-z7.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc132e084 // scvtf { z4.s-z7.s }, { z4.s-z7.s }\n"
+ ".inst 0xc132e18c // scvtf { z12.s-z15.s }, { z12.s-z15.s }\n"
+ "fmad z4.s, p0/M, z3.s, z2.s\n"
+ "fmad z5.s, p0/M, z3.s, z2.s\n"
+ "add x12, x12, #0x4\n"
+ "fmad z6.s, p0/M, z3.s, z2.s\n"
+ "fmad z7.s, p0/M, z3.s, z2.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z12.s, p0/M, z3.s, z10.s\n"
+ "fmad z13.s, p0/M, z3.s, z10.s\n"
+ "fmad z14.s, p0/M, z3.s, z10.s\n"
+ "fmad z15.s, p0/M, z3.s, z10.s\n"
+ ".inst 0xc1a0c824 // fclamp { z4.s-z7.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c82c // fclamp { z12.s-z15.s }, z1.s, z0.s\n"
+ ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604347 // st1w { z7.s, z15.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 16b\n"
+ "17:" // Store to output array: Accumulator row 0 oddments
+ "cbz x20, 18f\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ ".inst 0xc0860438 // mova { z24.s-z27.s }, za1h.s[x12]\n"
+ ".inst 0xc132e210 // scvtf { z16.s-z19.s }, { z16.s-z19.s }\n"
+ ".inst 0xc132e318 // scvtf { z24.s-z27.s }, { z24.s-z27.s }\n"
+ "fmad z16.s, p0/M, z3.s, z2.s\n"
+ "fmad z17.s, p0/M, z3.s, z2.s\n"
+ "subs x20, x20, #0x1\n"
+ "fmad z18.s, p0/M, z3.s, z2.s\n"
+ "fmad z19.s, p0/M, z3.s, z2.s\n"
+ "fmad z24.s, p0/M, z3.s, z10.s\n"
+ "fmad z25.s, p0/M, z3.s, z10.s\n"
+ "fmad z26.s, p0/M, z3.s, z10.s\n"
+ "fmad z27.s, p0/M, z3.s, z10.s\n"
+ ".inst 0xc1a0c830 // fclamp { z16.s-z19.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c838 // fclamp { z24.s-z27.s }, z1.s, z0.s\n"
+ ".inst 0xa1604350 // st1w { z16.s, z24.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 18f\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa1604351 // st1w { z17.s, z25.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 18f\n"
+ ".inst 0xa1604352 // st1w { z18.s, z26.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "18:" // Store to output array: Accumulator row 0 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 22f\n"
+ "cmp x25, x23\n"
+ "mov x12, #0x0\n"
+ "csel x20, x25, x23, LT\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "cbz x21, 20f\n"
+ "19:" // Store to output array: Accumulator row 1 loop
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc086047c // mova { z28.s-z31.s }, za3h.s[x12]\n"
+ ".inst 0xc132e294 // scvtf { z20.s-z23.s }, { z20.s-z23.s }\n"
+ ".inst 0xc132e39c // scvtf { z28.s-z31.s }, { z28.s-z31.s }\n"
+ "fmad z20.s, p0/M, z3.s, z2.s\n"
+ "fmad z21.s, p0/M, z3.s, z2.s\n"
+ "add x12, x12, #0x4\n"
+ "fmad z22.s, p0/M, z3.s, z2.s\n"
+ "fmad z23.s, p0/M, z3.s, z2.s\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z28.s, p0/M, z3.s, z10.s\n"
+ "fmad z29.s, p0/M, z3.s, z10.s\n"
+ "fmad z30.s, p0/M, z3.s, z10.s\n"
+ "fmad z31.s, p0/M, z3.s, z10.s\n"
+ ".inst 0xc1a0c834 // fclamp { z20.s-z23.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c83c // fclamp { z28.s-z31.s }, z1.s, z0.s\n"
+ ".inst 0xa1604354 // st1w { z20.s, z28.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604355 // st1w { z21.s, z29.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604356 // st1w { z22.s, z30.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ ".inst 0xa1604357 // st1w { z23.s, z31.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 19b\n"
+ "20:" // Store to output array: Accumulator row 1 oddments
+ "cbz x20, 21f\n"
+ ".inst 0xc0860444 // mova { z4.s-z7.s }, za2h.s[x12]\n"
+ ".inst 0xc086046c // mova { z12.s-z15.s }, za3h.s[x12]\n"
+ ".inst 0xc132e084 // scvtf { z4.s-z7.s }, { z4.s-z7.s }\n"
+ ".inst 0xc132e18c // scvtf { z12.s-z15.s }, { z12.s-z15.s }\n"
+ "fmad z4.s, p0/M, z3.s, z2.s\n"
+ "fmad z5.s, p0/M, z3.s, z2.s\n"
+ "subs x20, x20, #0x1\n"
+ "fmad z6.s, p0/M, z3.s, z2.s\n"
+ "fmad z7.s, p0/M, z3.s, z2.s\n"
+ "fmad z12.s, p0/M, z3.s, z10.s\n"
+ "fmad z13.s, p0/M, z3.s, z10.s\n"
+ "fmad z14.s, p0/M, z3.s, z10.s\n"
+ "fmad z15.s, p0/M, z3.s, z10.s\n"
+ ".inst 0xc1a0c824 // fclamp { z4.s-z7.s }, z1.s, z0.s\n"
+ ".inst 0xc1a0c82c // fclamp { z12.s-z15.s }, z1.s, z0.s\n"
+ ".inst 0xa1604344 // st1w { z4.s, z12.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 21f\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xa1604345 // st1w { z5.s, z13.s }, p8, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 21f\n"
+ ".inst 0xa1604346 // st1w { z6.s, z14.s }, p8, [x26]\n"
+ "21:" // Store to output array: Accumulator row 1 oddments: End
+ "22:" // Store to output array: End
+ "tbz x16, #0, 24f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "23:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c5f4 // ld1w { z20.s-z23.s }, pn9.b/Z, [x15]\n"
+ ".inst 0xa041c5ec // ld1w { z12.s-z15.s }, pn9.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c5e4 // ld1w { z4.s-z7.s }, pn9.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c5e8 // ld1w { z8.s-z11.s }, pn9.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840482 // mova za2h.s[x12], { z4.s-z7.s }\n"
+ ".inst 0xc0840503 // mova za3h.s[x12], { z8.s-z11.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 23b\n"
+ "24:" // End block
+ "incw x10, ALL, MUL #2\n"
+ "cmp x10, x9\n"
+ "blt 3b\n"
+ "incw x11, ALL, MUL #2\n"
+ "mov x10, #0x0\n"
+ "cmp x11, x13\n"
+ "mov x28, x27\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [dq] "r" (&dq), [offset_DequantizeFloat_scale] "I" (offsetof(DequantizeFloat, scale)), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_late_bias] "I" (offsetof(KernelArgs, late_bias)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp
new file mode 100644
index 0000000000..70952f4f03
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL.hpp
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include <cstdint>
+#include "../std_transforms_sme.hpp"
+
+namespace arm_gemm
+{
+
+// Implementations
+void sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+class cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL
+{
+public:
+ typedef int8_t operand_type;
+ typedef float result_type;
+
+ typedef void (*kern_type)(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer);
+
+ /* Kernel blocking parameters */
+ static unsigned int out_height()
+ {
+ return sme::get_vector_length<int32_t>() * 4;
+ }
+
+ static unsigned int out_width()
+ {
+ return sme::get_vector_length<int32_t>() * 1;
+ }
+
+ static constexpr unsigned int k_unroll()
+ {
+ return 4;
+ }
+
+ static constexpr bool supports_accumulate()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_bias()
+ {
+ return true;
+ }
+
+ static constexpr bool supports_activation()
+ {
+ return true;
+ }
+
+ static constexpr bool is_sme()
+ {
+ return true;
+ }
+
+ // Default to the generic kernel
+ kern_type kernel = sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL;
+
+ StdTransformsSME<operand_type, result_type, 4, 1, 4> transforms = {};
+
+ cls_sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const CPUInfo *)
+ {
+ }
+};
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
new file mode 100644
index 0000000000..bafb16bca8
--- /dev/null
+++ b/src/core/NEON/kernels/arm_gemm/kernels/sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL/generic.cpp
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) 2024 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ARM_COMPUTE_ENABLE_SME2
+
+#include "arm_gemm.hpp"
+
+#include <cstdint>
+#include "../../asmlib.hpp"
+#include "../../utils.hpp"
+
+namespace arm_gemm {
+
+void sme2_interleaved_nomerge_s8qfp32_mopa_4VLx1VL(const int8_t *const A, const int8_t *const B, float *const C, int ldc, const int M, const int N, const int K, const int32_t *const bias, const DequantizeFloat &dq, const float *const late_bias, const Activation act, bool accumulate, int32_t *const accumulator_buffer)
+{
+ struct KernelArgs
+ {
+ KernelArgs(
+ const int8_t *const A,
+ const int8_t *const B,
+ float *const C, const int ldc,
+ const int M, const int N, const int K,
+ const int32_t *const bias, const float *const late_bias, const Activation act,
+ bool accumulate,
+ int32_t *const accumulator_buffer
+ ) : A(A),
+ B(B), kstride_bytes(roundup(K, 4) * sizeof(int8_t)),
+ C(C), ldcb(ldc * sizeof(float)),
+ M(M), N(N), K(K),
+ min(-std::numeric_limits<float>::infinity()),
+ max(std::numeric_limits<float>::infinity()),
+ bias(bias), late_bias(late_bias),
+ accumulator_buffer(accumulator_buffer),
+ flags(0x0)
+ {
+ if (accumulate)
+ {
+ flags |= 1 << 0; // FILL_ACCUMULATORS_FROM_BUFFER
+ }
+ if (C == nullptr)
+ {
+ flags |= 1 << 1; // STORE_ACCUMULATORS_TO_BUFFER
+ }
+
+ // Initialise the activation values
+ switch (act.type)
+ {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ this->max = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ this->min = static_cast<float>(0);
+ break;
+ }
+ }
+
+ const int8_t *const A;
+ const int8_t *const B;
+ const long kstride_bytes;
+ float *const C;
+ const long ldcb;
+ const long M, N, K;
+ float min = -std::numeric_limits<float>::infinity();
+ float max = std::numeric_limits<float>::infinity();
+
+ const int32_t *const bias;
+ const float *const late_bias;
+
+ int32_t *const accumulator_buffer;
+ uint64_t flags;
+ };
+
+ // Construct arguments for this kernel
+ KernelArgs args(A, B, C, ldc, M, N, K, bias, late_bias, act, accumulate, accumulator_buffer);
+
+ __asm__ __volatile__(
+ "ldr x16, [%x[args], %[offsetof_flags]]\n"
+ ".inst 0xd503477f // SMSTART ZA\n"
+ "ptrue p1.b\n"
+ ".inst 0x25207810 // ptrue pn8.b\n"
+ "ldr x15, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "ldr x14, [%x[args], %[offsetof_accumulator_buffer]]\n"
+ "tbz x16, #0, 2f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "1:" // Initial accumulator load from buffer: Loop
+ ".inst 0xa040c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f0 // ld1w { z16.s-z19.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840680 // mova za0h.s[x12], { z20.s-z23.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840781 // mova za1h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xc0840502 // mova za2h.s[x12], { z8.s-z11.s }\n"
+ ".inst 0xc0840603 // mova za3h.s[x12], { z16.s-z19.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 1b\n"
+ "2:" // Initial accumulator load from buffer: End
+ "ldr w13, [%x[args], %[offsetof_M]]\n"
+ "mov x11, #0x0\n"
+ "mov x10, #0x0\n"
+ "ldr w9, [%x[args], %[offsetof_N]]\n"
+ "ldr x28, [%x[args], %[offsetof_A]]\n"
+ "3:" // M and N loop
+ "mov x27, x28\n"
+ "whilelt p0.s, x10, x9\n"
+ "tbnz x16, #0, 4f\n"
+ "ldr x20, [%x[args], %[offsetof_bias]]\n"
+ ".inst 0xc00800ff // zero { zad0, zad1, zad2, zad3, zad4, zad5, zad6, zad7 }\n"
+ "cbz x20, 5f\n"
+ "ld1w { z23.s }, p0/Z, [x20, x10, LSL #2]\n"
+ ".inst 0xc09026e0 // addha za0.s, p1/M, p1/M, z23.s\n"
+ ".inst 0xc09026e1 // addha za1.s, p1/M, p1/M, z23.s\n"
+ ".inst 0xc09026e2 // addha za2.s, p1/M, p1/M, z23.s\n"
+ ".inst 0xc09026e3 // addha za3.s, p1/M, p1/M, z23.s\n"
+ "4:" // Prepare accumulators: Test for last block
+ "mov x20, x10\n"
+ "mov x21, x11\n"
+ "incw x20\n"
+ "incw x21, ALL, MUL #4\n"
+ "cmp x20, x9\n"
+ "mov x20, x16\n"
+ "csel x21, x11, x21, LT\n"
+ "bfm x16, XZR, #0x0, #0x0 // bfc x16, #0x0, #0x1\n"
+ "cmp x21, x13\n"
+ "csel x16, x20, x16, LT\n"
+ "5:" // Prepare accumulators: End
+ "ldr x20, [%x[args], %[offsetof_K]]\n"
+ "ldr x23, [%x[args], %[offsetof_B]]\n"
+ "ldr x22, [%x[args], %[offsetof_kstride_bytes]]\n"
+ "add x20, x20, #0x3\n"
+ "lsr x20, x20, #0x2\n"
+ "lsr x21, x20, #0x2\n"
+ "madd x23, x10, x22, x23\n" // bptr = B + n * kstride_bytes
+ "and x20, x20, #0x3\n"
+ "cbz x21, 8f\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0408378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ "ld1b { z4.b }, p1/Z, [x23]\n"
+ ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ "ld1b { z2.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa042836c // ld1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z11.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa0438370 // ld1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ "ld1b { z28.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "ble 7f\n"
+ "6:" // K loop
+ ".inst 0xa0842700 // smopa za0.s, p1/M, p1/M, z24.b, z4.b\n"
+ "subs x21, x21, #0x1\n"
+ ".inst 0xa0842721 // smopa za1.s, p1/M, p1/M, z25.b, z4.b\n"
+ ".inst 0xa0842742 // smopa za2.s, p1/M, p1/M, z26.b, z4.b\n"
+ ".inst 0xa0842763 // smopa za3.s, p1/M, p1/M, z27.b, z4.b\n"
+ ".inst 0xa0408378 // ld1b { z24.b-z27.b }, pn8.b/Z, [x27]\n"
+ ".inst 0xa0822680 // smopa za0.s, p1/M, p1/M, z20.b, z2.b\n"
+ "ld1b { z4.b }, p1/Z, [x23]\n"
+ ".inst 0xa08226a1 // smopa za1.s, p1/M, p1/M, z21.b, z2.b\n"
+ ".inst 0xa08226c2 // smopa za2.s, p1/M, p1/M, z22.b, z2.b\n"
+ ".inst 0xa08226e3 // smopa za3.s, p1/M, p1/M, z23.b, z2.b\n"
+ ".inst 0xa0418374 // ld1b { z20.b-z23.b }, pn8.b/Z, [x27, #0x4, MUL VL]\n"
+ ".inst 0xa08b2580 // smopa za0.s, p1/M, p1/M, z12.b, z11.b\n"
+ "ld1b { z2.b }, p1/Z, [x23, #1, MUL VL]\n"
+ ".inst 0xa08b25a1 // smopa za1.s, p1/M, p1/M, z13.b, z11.b\n"
+ ".inst 0xa08b25c2 // smopa za2.s, p1/M, p1/M, z14.b, z11.b\n"
+ ".inst 0xa08b25e3 // smopa za3.s, p1/M, p1/M, z15.b, z11.b\n"
+ ".inst 0xa042836c // ld1b { z12.b-z15.b }, pn8.b/Z, [x27, #0x8, MUL VL]\n"
+ "ld1b { z11.b }, p1/Z, [x23, #2, MUL VL]\n"
+ ".inst 0xa09c2600 // smopa za0.s, p1/M, p1/M, z16.b, z28.b\n"
+ ".inst 0xa09c2621 // smopa za1.s, p1/M, p1/M, z17.b, z28.b\n"
+ ".inst 0xa09c2642 // smopa za2.s, p1/M, p1/M, z18.b, z28.b\n"
+ ".inst 0xa09c2663 // smopa za3.s, p1/M, p1/M, z19.b, z28.b\n"
+ ".inst 0xa0438370 // ld1b { z16.b-z19.b }, pn8.b/Z, [x27, #0xc, MUL VL]\n"
+ "addvl x27, x27, #16\n"
+ "ld1b { z28.b }, p1/Z, [x23, #3, MUL VL]\n"
+ "addvl x23, x23, #4\n"
+ "bgt 6b\n"
+ "7:" // K loop tail
+ ".inst 0xa0842700 // smopa za0.s, p1/M, p1/M, z24.b, z4.b\n"
+ ".inst 0xa0842721 // smopa za1.s, p1/M, p1/M, z25.b, z4.b\n"
+ ".inst 0xa0842742 // smopa za2.s, p1/M, p1/M, z26.b, z4.b\n"
+ ".inst 0xa0842763 // smopa za3.s, p1/M, p1/M, z27.b, z4.b\n"
+ ".inst 0xa0822680 // smopa za0.s, p1/M, p1/M, z20.b, z2.b\n"
+ ".inst 0xa08226a1 // smopa za1.s, p1/M, p1/M, z21.b, z2.b\n"
+ ".inst 0xa08226c2 // smopa za2.s, p1/M, p1/M, z22.b, z2.b\n"
+ ".inst 0xa08226e3 // smopa za3.s, p1/M, p1/M, z23.b, z2.b\n"
+ ".inst 0xa08b2580 // smopa za0.s, p1/M, p1/M, z12.b, z11.b\n"
+ ".inst 0xa08b25a1 // smopa za1.s, p1/M, p1/M, z13.b, z11.b\n"
+ ".inst 0xa08b25c2 // smopa za2.s, p1/M, p1/M, z14.b, z11.b\n"
+ ".inst 0xa08b25e3 // smopa za3.s, p1/M, p1/M, z15.b, z11.b\n"
+ ".inst 0xa09c2600 // smopa za0.s, p1/M, p1/M, z16.b, z28.b\n"
+ ".inst 0xa09c2621 // smopa za1.s, p1/M, p1/M, z17.b, z28.b\n"
+ ".inst 0xa09c2642 // smopa za2.s, p1/M, p1/M, z18.b, z28.b\n"
+ ".inst 0xa09c2663 // smopa za3.s, p1/M, p1/M, z19.b, z28.b\n"
+ "8:" // K oddments
+ "cbz x20, 10f\n"
+ "9:" // K oddments: Loop
+ ".inst 0xa1408373 // ld1b { z19.b, z23.b, z27.b, z31.b }, pn8.b/Z, [x27]\n"
+ "subs x20, x20, #0x1\n"
+ "addvl x27, x27, #4\n"
+ "ld1b { z16.b }, p1/Z, [x23]\n"
+ "addvl x23, x23, #1\n"
+ ".inst 0xa0902660 // smopa za0.s, p1/M, p1/M, z19.b, z16.b\n"
+ ".inst 0xa09026e1 // smopa za1.s, p1/M, p1/M, z23.b, z16.b\n"
+ ".inst 0xa0902762 // smopa za2.s, p1/M, p1/M, z27.b, z16.b\n"
+ ".inst 0xa09027e3 // smopa za3.s, p1/M, p1/M, z31.b, z16.b\n"
+ "bgt 9b\n"
+ "10:" // K oddments: End
+ "tbz x16, #1, 14f\n"
+ "tbz x16, #0, 12f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "11:" // Store to partial result buffer: Store and refill: Loop
+ ".inst 0xa040c1e8 // ld1w { z8.s-z11.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ ".inst 0xc0860424 // mova { z4.s-z7.s }, za1h.s[x12]\n"
+ ".inst 0xa041c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xc0860458 // mova { z24.s-z27.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa042c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1f4 // ld1w { z20.s-z23.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840500 // mova za0h.s[x12], { z8.s-z11.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840581 // mova za1h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xa060c1c0 // st1w { z0.s-z3.s }, pn8.b, [x14]\n"
+ ".inst 0xc0840782 // mova za2h.s[x12], { z28.s-z31.s }\n"
+ ".inst 0xa061c1c4 // st1w { z4.s-z7.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ ".inst 0xc0840683 // mova za3h.s[x12], { z20.s-z23.s }\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa062c1d8 // st1w { z24.s-z27.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa063c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 11b\n"
+ "b 30f\n"
+ "12:" // Store to partial result buffer: Store only
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "13:" // Store to partial result buffer: Store only: Loop
+ ".inst 0xc0860408 // mova { z8.s-z11.s }, za0h.s[x12]\n"
+ ".inst 0xc086042c // mova { z12.s-z15.s }, za1h.s[x12]\n"
+ ".inst 0xc0860454 // mova { z20.s-z23.s }, za2h.s[x12]\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ ".inst 0xa060c1c8 // st1w { z8.s-z11.s }, pn8.b, [x14]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xa061c1cc // st1w { z12.s-z15.s }, pn8.b, [x14, #0x4, MUL VL]\n"
+ "cmp x12, x20\n"
+ ".inst 0xa062c1d4 // st1w { z20.s-z23.s }, pn8.b, [x14, #0x8, MUL VL]\n"
+ ".inst 0xa063c1d0 // st1w { z16.s-z19.s }, pn8.b, [x14, #0xc, MUL VL]\n"
+ "addvl x14, x14, #16\n"
+ "blt 13b\n"
+ "b 30f\n"
+ "14:" // Store to output array
+ "ldr x26, [%x[args], %[offsetof_C]]\n"
+ "sub x25, x13, x11\n"
+ "ld1rw { z23.s }, p1/Z, [%x[dq], %[offset_DequantizeFloat_scale]]\n"
+ "fmov z22.s, #0x0\n"
+ "ldr x24, [%x[args], %[offsetof_ldcb]]\n"
+ "ldr x20, [%x[args], %[offsetof_late_bias]]\n"
+ "add x26, x26, x10, LSL #2\n" // C += n
+ "madd x26, x11, x24, x26\n" // C += m * ldc
+ "cbz x20, 15f\n"
+ "add x20, x20, x10, LSL #2\n"
+ "ld1w { z22.s }, p0/Z, [x20]\n"
+ "15:" // Store to output array: no late bias
+ "cntw x23\n"
+ "ld1rw { z21.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_min]]\n"
+ "mov x12, #0x0\n"
+ "cmp x25, x23\n"
+ "ld1rw { z20.s }, p1/Z, [%x[args], %[offsetof_KernelArgs_max]]\n"
+ "csel x22, x25, x23, LT\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 17f\n"
+ "16:" // Store to output array: Accumulator row 0 loop
+ ".inst 0xc0860400 // mova { z0.s-z3.s }, za0h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc132e000 // scvtf { z0.s-z3.s }, { z0.s-z3.s }\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z0.s, p1/M, z23.s, z22.s\n"
+ "fmad z1.s, p1/M, z23.s, z22.s\n"
+ "fmad z2.s, p1/M, z23.s, z22.s\n"
+ "fmad z3.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4caa0 // fclamp { z0.s-z3.s }, z21.s, z20.s\n"
+ "st1w { z0.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z1.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z2.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z3.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 16b\n"
+ "17:" // Store to output array: Accumulator row 0 oddments
+ "cbz x20, 18f\n"
+ ".inst 0xc0860410 // mova { z16.s-z19.s }, za0h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc132e210 // scvtf { z16.s-z19.s }, { z16.s-z19.s }\n"
+ "fmad z16.s, p1/M, z23.s, z22.s\n"
+ "fmad z17.s, p1/M, z23.s, z22.s\n"
+ "fmad z18.s, p1/M, z23.s, z22.s\n"
+ "fmad z19.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 18f\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z17.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 18f\n"
+ "st1w { z18.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "18:" // Store to output array: Accumulator row 0 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 28f\n"
+ "cmp x25, x23\n"
+ "mov x12, #0x0\n"
+ "csel x22, x25, x23, LT\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 20f\n"
+ "19:" // Store to output array: Accumulator row 1 loop
+ ".inst 0xc0860430 // mova { z16.s-z19.s }, za1h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc132e210 // scvtf { z16.s-z19.s }, { z16.s-z19.s }\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z16.s, p1/M, z23.s, z22.s\n"
+ "fmad z17.s, p1/M, z23.s, z22.s\n"
+ "fmad z18.s, p1/M, z23.s, z22.s\n"
+ "fmad z19.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z17.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z18.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z19.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 19b\n"
+ "20:" // Store to output array: Accumulator row 1 oddments
+ "cbz x20, 21f\n"
+ ".inst 0xc086043c // mova { z28.s-z31.s }, za1h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc132e39c // scvtf { z28.s-z31.s }, { z28.s-z31.s }\n"
+ "fmad z28.s, p1/M, z23.s, z22.s\n"
+ "fmad z29.s, p1/M, z23.s, z22.s\n"
+ "fmad z30.s, p1/M, z23.s, z22.s\n"
+ "fmad z31.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cabc // fclamp { z28.s-z31.s }, z21.s, z20.s\n"
+ "st1w { z28.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 21f\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z29.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 21f\n"
+ "st1w { z30.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "21:" // Store to output array: Accumulator row 1 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 28f\n"
+ "cmp x25, x23\n"
+ "mov x12, #0x0\n"
+ "csel x22, x25, x23, LT\n"
+ "lsr x21, x22, #0x2\n"
+ "and x20, x22, #0x3\n"
+ "cbz x21, 23f\n"
+ "22:" // Store to output array: Accumulator row 2 loop
+ ".inst 0xc086044c // mova { z12.s-z15.s }, za2h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc132e18c // scvtf { z12.s-z15.s }, { z12.s-z15.s }\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z12.s, p1/M, z23.s, z22.s\n"
+ "fmad z13.s, p1/M, z23.s, z22.s\n"
+ "fmad z14.s, p1/M, z23.s, z22.s\n"
+ "fmad z15.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4caac // fclamp { z12.s-z15.s }, z21.s, z20.s\n"
+ "st1w { z12.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z13.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z14.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z15.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 22b\n"
+ "23:" // Store to output array: Accumulator row 2 oddments
+ "cbz x20, 24f\n"
+ ".inst 0xc0860450 // mova { z16.s-z19.s }, za2h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc132e210 // scvtf { z16.s-z19.s }, { z16.s-z19.s }\n"
+ "fmad z16.s, p1/M, z23.s, z22.s\n"
+ "fmad z17.s, p1/M, z23.s, z22.s\n"
+ "fmad z18.s, p1/M, z23.s, z22.s\n"
+ "fmad z19.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 24f\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z17.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 24f\n"
+ "st1w { z18.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "24:" // Store to output array: Accumulator row 2 oddments: End
+ "subs x25, x25, x22\n"
+ "beq 28f\n"
+ "cmp x25, x23\n"
+ "mov x12, #0x0\n"
+ "csel x20, x25, x23, LT\n"
+ "lsr x21, x20, #0x2\n"
+ "and x20, x20, #0x3\n"
+ "cbz x21, 26f\n"
+ "25:" // Store to output array: Accumulator row 3 loop
+ ".inst 0xc0860478 // mova { z24.s-z27.s }, za3h.s[x12]\n"
+ "add x12, x12, #0x4\n"
+ ".inst 0xc132e318 // scvtf { z24.s-z27.s }, { z24.s-z27.s }\n"
+ "cmp x12, x21, LSL #2\n"
+ "fmad z24.s, p1/M, z23.s, z22.s\n"
+ "fmad z25.s, p1/M, z23.s, z22.s\n"
+ "fmad z26.s, p1/M, z23.s, z22.s\n"
+ "fmad z27.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cab8 // fclamp { z24.s-z27.s }, z21.s, z20.s\n"
+ "st1w { z24.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z25.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z26.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "st1w { z27.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "blt 25b\n"
+ "26:" // Store to output array: Accumulator row 3 oddments
+ "cbz x20, 27f\n"
+ ".inst 0xc0860470 // mova { z16.s-z19.s }, za3h.s[x12]\n"
+ "subs x20, x20, #0x1\n"
+ ".inst 0xc132e210 // scvtf { z16.s-z19.s }, { z16.s-z19.s }\n"
+ "fmad z16.s, p1/M, z23.s, z22.s\n"
+ "fmad z17.s, p1/M, z23.s, z22.s\n"
+ "fmad z18.s, p1/M, z23.s, z22.s\n"
+ "fmad z19.s, p1/M, z23.s, z22.s\n"
+ ".inst 0xc1b4cab0 // fclamp { z16.s-z19.s }, z21.s, z20.s\n"
+ "st1w { z16.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 27f\n"
+ "subs x20, x20, #0x1\n"
+ "st1w { z17.s }, p0, [x26]\n"
+ "add x26, x26, x24\n"
+ "beq 27f\n"
+ "st1w { z18.s }, p0, [x26]\n"
+ "27:" // Store to output array: Accumulator row 3 oddments: End
+ "28:" // Store to output array: End
+ "tbz x16, #0, 30f\n"
+ "mov x12, #0x0\n"
+ "cntw x20\n"
+ "29:" // Store to output array: Refill accumulators: Loop
+ ".inst 0xa040c1fc // ld1w { z28.s-z31.s }, pn8.b/Z, [x15]\n"
+ ".inst 0xa041c1e0 // ld1w { z0.s-z3.s }, pn8.b/Z, [x15, #0x4, MUL VL]\n"
+ ".inst 0xa042c1ec // ld1w { z12.s-z15.s }, pn8.b/Z, [x15, #0x8, MUL VL]\n"
+ ".inst 0xa043c1e4 // ld1w { z4.s-z7.s }, pn8.b/Z, [x15, #0xc, MUL VL]\n"
+ ".inst 0xc0840780 // mova za0h.s[x12], { z28.s-z31.s }\n"
+ "addvl x15, x15, #16\n"
+ ".inst 0xc0840401 // mova za1h.s[x12], { z0.s-z3.s }\n"
+ ".inst 0xc0840582 // mova za2h.s[x12], { z12.s-z15.s }\n"
+ ".inst 0xc0840483 // mova za3h.s[x12], { z4.s-z7.s }\n"
+ "add x12, x12, #0x4\n"
+ "cmp x12, x20\n"
+ "blt 29b\n"
+ "30:" // End block
+ "incw x10\n"
+ "cmp x10, x9\n"
+ "blt 3b\n"
+ "incw x11, ALL, MUL #4\n"
+ "mov x10, #0x0\n"
+ "cmp x11, x13\n"
+ "mov x28, x27\n"
+ "blt 3b\n"
+ ".inst 0xd503467f // SMSTOP\n"
+ :
+ : [args] "r" (&args), [dq] "r" (&dq), [offset_DequantizeFloat_scale] "I" (offsetof(DequantizeFloat, scale)), [offsetof_A] "I" (offsetof(KernelArgs, A)), [offsetof_B] "I" (offsetof(KernelArgs, B)), [offsetof_C] "I" (offsetof(KernelArgs, C)), [offsetof_K] "I" (offsetof(KernelArgs, K)), [offsetof_KernelArgs_max] "I" (offsetof(KernelArgs, max)), [offsetof_KernelArgs_min] "I" (offsetof(KernelArgs, min)), [offsetof_M] "I" (offsetof(KernelArgs, M)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_accumulator_buffer] "I" (offsetof(KernelArgs, accumulator_buffer)), [offsetof_bias] "I" (offsetof(KernelArgs, bias)), [offsetof_flags] "I" (offsetof(KernelArgs, flags)), [offsetof_kstride_bytes] "I" (offsetof(KernelArgs, kstride_bytes)), [offsetof_late_bias] "I" (offsetof(KernelArgs, late_bias)), [offsetof_ldcb] "I" (offsetof(KernelArgs, ldcb))
+ : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+}
+
+} // namespace arm_gemm
+
+#endif // ARM_COMPUTE_ENABLE_SME2
diff --git a/src/core/NEON/kernels/arm_gemm/quantized.cpp b/src/core/NEON/kernels/arm_gemm/quantized.cpp
index 111d01ed3a..6da9f4be0e 100644
--- a/src/core/NEON/kernels/arm_gemm/quantized.cpp
+++ b/src/core/NEON/kernels/arm_gemm/quantized.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019 Arm Limited.
+ * Copyright (c) 2019, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -1142,6 +1142,64 @@ void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int h
template void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const int8_t *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col);
template void compute_col_sums(const Requantize32 &qp, unsigned int width, unsigned int height, const uint8_t *input, unsigned int in_stride, int32_t *col_bias, unsigned int depth, unsigned int multi, unsigned int first_col);
+void dequantize_block_32(const DequantizeFloat &qp, unsigned int width, unsigned int height,
+ const int32_t* in_ptr, unsigned int in_stride, float *out_ptr, unsigned int out_stride,
+ const float* bias_ptr, bool accumulate, const Activation &act)
+{
+ const float32x4_t vscale = vdupq_n_f32(qp.scale);
+ float maxval = std::numeric_limits<float>::infinity();
+ float minval = -std::numeric_limits<float>::infinity();
+
+ switch(act.type) {
+ default:
+ case Activation::Type::None:
+ break;
+ case Activation::Type::BoundedReLU:
+ maxval = static_cast<float>(act.param1);
+ /* fall through */
+ case Activation::Type::ReLU:
+ minval = 0;
+ break;
+ }
+
+ const float32x4_t vmin = vdupq_n_f32(minval);
+ const float32x4_t vmax = vdupq_n_f32(maxval);
+
+ for(unsigned int row=0; row<height; row++) {
+ auto row_in_ptr = in_ptr + (row * in_stride);
+ auto row_out_ptr = out_ptr + (row * out_stride);
+ unsigned int col=0;
+ if (width >= 4) {
+ for(; col <= (width - 4); col+= 4) {
+ const int32x4_t vin = vld1q_s32(row_in_ptr + col);
+ float32x4_t vdeq = vmulq_f32(vcvtq_f32_s32(vin), vscale);
+ if(bias_ptr) {
+ const float32x4_t bin = vld1q_f32(bias_ptr + col);
+ vdeq = vaddq_f32(vdeq, bin);
+ }
+ if(accumulate) {
+ vdeq = vaddq_f32(vdeq, vld1q_f32(row_out_ptr + col));
+ }
+ vdeq = vminq_f32(vmaxq_f32(vdeq, vmin), vmax);
+ vst1q_f32(reinterpret_cast<float *>(row_out_ptr + col), vdeq);
+ }
+ }
+ // left-over elements
+ for(; col < width; ++col) {
+ const int32_t val = *(row_in_ptr + col);
+ float res = static_cast<float>(val * qp.scale);
+ if(bias_ptr) {
+ res += static_cast<float>(*(bias_ptr + col));
+ }
+ if(accumulate) {
+ res += *(row_out_ptr + col);
+ }
+ res = std::min(std::max(res, minval), maxval);
+ *(row_out_ptr + col) = res;
+ }
+ }
+}
+
} // namespace arm_gemm
#endif // __aarch64__
diff --git a/src/core/NEON/kernels/arm_gemm/quantized.hpp b/src/core/NEON/kernels/arm_gemm/quantized.hpp
index 31dd65b397..bc64fd967b 100644
--- a/src/core/NEON/kernels/arm_gemm/quantized.hpp
+++ b/src/core/NEON/kernels/arm_gemm/quantized.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019, 2023 Arm Limited.
+ * Copyright (c) 2019, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -45,4 +45,8 @@ template<typename T>
void row_sums_indirect(size_t num_strings, const unsigned int *string_lengths, IndirectInputArg<T> A_arg,
size_t M, int32_t *output_ptr, const Requantize32 *qp);
+void dequantize_block_32(const DequantizeFloat &qp, unsigned int width, unsigned int height,
+ const int32_t* input, unsigned int in_stride, float *output, unsigned int out_stride,
+ const float *row_bias, bool not_first_pass, const Activation &act);
+
} // namespace arm_gemm
diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp
index e290783021..2a76a5958d 100644
--- a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp
+++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2022,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -51,17 +51,19 @@ Status validate_arguments(const ITensorInfo *mm_result,
int32_t a_offset,
int32_t b_offset)
{
- ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32, DataType::F32);
- // If a_offset == 0, vector_sum_col can be a nullptr
- if (a_offset != 0)
+ // We run if the offset is nonzero or a sum col has been provided, we need
+ // the second option in case the QuantizationInfo is dynamic
+ if (a_offset != 0 || vector_sum_col != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
}
- // If b_offset == 0, vector_sum_row can be a nullptr
- if (b_offset != 0)
+ // We run if the offset is nonzero or a sum row has been provided, we need
+ // the second option in case the QuantizationInfo is dynamic
+ if (b_offset != 0 || vector_sum_row != nullptr)
{
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
@@ -86,7 +88,7 @@ Status validate_arguments(const ITensorInfo *mm_result,
ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
"mm_result tensor must have the same number of batches of output tensor");
- if (a_offset != 0)
+ if (vector_sum_col != nullptr)
{
TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
vector_sum_col_shape.collapse_from(1);
@@ -102,6 +104,275 @@ Status validate_arguments(const ITensorInfo *mm_result,
return Status{};
}
+void run_offset_contribution_float(const Window &window,
+ ITensor *mm_result,
+ const ITensor *vector_sum_col,
+ const ITensor *vector_sum_row,
+ int32_t a_offset,
+ int32_t b_offset,
+ int32_t k_offset,
+ float scale,
+ bool slide_vector_sum_col,
+ bool is_gemm3d)
+{
+ Window collapsed_window = window.collapse_if_possible(window, Window::DimZ);
+ collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+ const int height_input = is_gemm3d ? mm_result->info()->dimension(1) : 0;
+ const int depth_input = is_gemm3d ? mm_result->info()->dimension(2) : 1;
+
+ const int window_start_x = window.x().start();
+ const int window_end_x = window.x().end();
+ const int window_step_x = 16;
+
+ // if vector_sum_col is nullptr then stride_y is 0, else get stride_y
+ const size_t sum_col_stride_y = (vector_sum_col != nullptr) ? (vector_sum_col->info()->strides_in_bytes().y()) : 0;
+ Iterator mm_result_it(mm_result, collapsed_window);
+
+ if ((a_offset != 0) && (b_offset != 0) && (vector_sum_col != nullptr) && (vector_sum_row != nullptr)) // true, true
+ {
+ // Set window for vector_sum_col
+ Window win_vector_sum_col(collapsed_window);
+ win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ // Set window for vector_sum_row
+ Window win_vector_sum_row(collapsed_window);
+ win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
+ Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
+
+ const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+ // Offset in case vector_sum_col is batched
+ const int vector_sum_col_batch_offset =
+ slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+ execute_window_loop(
+ collapsed_window,
+ [&](const Coordinates &id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const size_t batch_offset_col = batch_id * (sum_col_stride_y);
+ auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_offset_col +
+ batch_id * vector_sum_col_batch_offset);
+ auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr());
+
+ // Compute the leftover term due to b_offset.
+ int32_t b_offset_term_s32 =
+ *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) +
+ id.y() + (id.z() % depth_input) * height_input);
+ b_offset_term_s32 *= b_offset;
+
+ const int32x4_t b_offset_term_s32_vec = vdupq_n_s32(b_offset_term_s32);
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Compute the leftover term due to a_offset.
+ int32x4x4_t a_offset_term_s32 = {
+ {vld1q_s32(vector_sum_col_ptr + x + 0), vld1q_s32(vector_sum_col_ptr + x + 4),
+ vld1q_s32(vector_sum_col_ptr + x + 8), vld1q_s32(vector_sum_col_ptr + x + 12)}};
+
+ a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
+ a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
+ a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
+ a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
+
+ // Add a_offset_term_s32 and b_offset_term_s32
+ int32x4x4_t offset_term_s32 = {
+ {vdupq_n_s32(k_offset), vdupq_n_s32(k_offset), vdupq_n_s32(k_offset), vdupq_n_s32(k_offset)}};
+
+ offset_term_s32.val[0] =
+ vaddq_s32(offset_term_s32.val[0], vaddq_s32(a_offset_term_s32.val[0], b_offset_term_s32_vec));
+ offset_term_s32.val[1] =
+ vaddq_s32(offset_term_s32.val[1], vaddq_s32(a_offset_term_s32.val[1], b_offset_term_s32_vec));
+ offset_term_s32.val[2] =
+ vaddq_s32(offset_term_s32.val[2], vaddq_s32(a_offset_term_s32.val[2], b_offset_term_s32_vec));
+ offset_term_s32.val[3] =
+ vaddq_s32(offset_term_s32.val[3], vaddq_s32(a_offset_term_s32.val[3], b_offset_term_s32_vec));
+
+ float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4),
+ vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}};
+
+ // Convert and scale the S32 offsets to match the already scaled GEMM results
+ float32x4x4_t offset_terms_scaled = {{
+ vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[0]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[1]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[2]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(offset_term_s32.val[3]), scale),
+ }};
+
+ // Add the offset terms to the GEMM result
+ in_f32.val[0] = vaddq_f32(in_f32.val[0], offset_terms_scaled.val[0]);
+ in_f32.val[1] = vaddq_f32(in_f32.val[1], offset_terms_scaled.val[1]);
+ in_f32.val[2] = vaddq_f32(in_f32.val[2], offset_terms_scaled.val[2]);
+ in_f32.val[3] = vaddq_f32(in_f32.val[3], offset_terms_scaled.val[3]);
+
+ // Store the result with the offset contribution
+ vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]);
+ vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]);
+ vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]);
+ vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]);
+ }
+
+ // Left-overs loop
+ for (; x < window_end_x; ++x)
+ {
+ // Compute the leftover term due to a_offset.
+ int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
+
+ a_offset_term_s32 *= a_offset;
+
+ // Add the offset terms to GEMM's result
+ // Store the result with the offset contribution
+ mm_result_ptr[x] += (k_offset + a_offset_term_s32 + b_offset_term_s32) * scale;
+ }
+ },
+ vector_sum_col_it, vector_sum_row_it, mm_result_it);
+ }
+ else if ((a_offset == 0) && (b_offset != 0) && (vector_sum_row != nullptr)) // false, true
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(vector_sum_row);
+
+ // Set window for vector_sum_row
+ Window win_vector_sum_row(collapsed_window);
+ win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_vector_sum_row.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator vector_sum_row_it(vector_sum_row, win_vector_sum_row);
+
+ const size_t sum_row_stride_y = vector_sum_row->info()->strides_in_bytes().y();
+
+ execute_window_loop(
+ collapsed_window,
+ [&](const Coordinates &id)
+ {
+ const int batch_id = id.z() / depth_input;
+ auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr());
+
+ // Compute the leftover term due to b_offset.
+ int32_t row_sum =
+ *(reinterpret_cast<const int32_t *>(vector_sum_row_it.ptr() + batch_id * sum_row_stride_y) +
+ id.y() + (id.z() % depth_input) * height_input);
+ float scaled_b_offset_term_f32 = row_sum * b_offset * scale;
+
+ const float32x4_t b_offset_term_f32_vec = vdupq_n_f32(scaled_b_offset_term_f32);
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4),
+ vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}};
+
+ // Add the offset terms to GEMM's result
+ in_f32.val[0] = vaddq_f32(in_f32.val[0], b_offset_term_f32_vec);
+ in_f32.val[1] = vaddq_f32(in_f32.val[1], b_offset_term_f32_vec);
+ in_f32.val[2] = vaddq_f32(in_f32.val[2], b_offset_term_f32_vec);
+ in_f32.val[3] = vaddq_f32(in_f32.val[3], b_offset_term_f32_vec);
+
+ // Store the result with the offset contribution
+ vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]);
+ vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]);
+ vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]);
+ vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]);
+ }
+
+ // Left-overs loop
+ for (; x < window_end_x; ++x)
+ {
+ // Add the offset terms to GEMM's result
+ // Store the result with the offset contribution
+ mm_result_ptr[x] += scaled_b_offset_term_f32;
+ }
+ },
+ vector_sum_row_it, mm_result_it);
+ }
+ else if ((a_offset != 0) && (b_offset == 0) && (vector_sum_col != nullptr)) // true, false
+ {
+ // Set window for vector_sum_col
+ Window win_vector_sum_col(collapsed_window);
+ win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
+ win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
+
+ Iterator vector_sum_col_it(vector_sum_col, win_vector_sum_col);
+
+ // Offset in case vector_sum_col is batched
+ const int vector_sum_col_batch_offset =
+ slide_vector_sum_col ? vector_sum_col->info()->strides_in_bytes().z() : 0;
+
+ execute_window_loop(
+ collapsed_window,
+ [&](const Coordinates &id)
+ {
+ const int batch_id = id.z() / depth_input;
+ const size_t batch_offset_col =
+ batch_id *
+ (sum_col_stride_y); // Value to offset vector_sum_col_ptr to allow for iteration of y values in tensor
+ auto vector_sum_col_ptr = reinterpret_cast<const int32_t *>(vector_sum_col_it.ptr() + batch_offset_col +
+ batch_id * vector_sum_col_batch_offset);
+ auto mm_result_ptr = reinterpret_cast<float *>(mm_result_it.ptr());
+
+ int x = window_start_x;
+ for (; x <= (window_end_x - window_step_x); x += window_step_x)
+ {
+ // Compute the leftover term due to a_offset.
+ int32x4x4_t a_offset_term_s32 = {
+ {vld1q_s32(vector_sum_col_ptr + x + 0), vld1q_s32(vector_sum_col_ptr + x + 4),
+ vld1q_s32(vector_sum_col_ptr + x + 8), vld1q_s32(vector_sum_col_ptr + x + 12)}};
+
+ a_offset_term_s32.val[0] = vmulq_n_s32(a_offset_term_s32.val[0], a_offset);
+ a_offset_term_s32.val[1] = vmulq_n_s32(a_offset_term_s32.val[1], a_offset);
+ a_offset_term_s32.val[2] = vmulq_n_s32(a_offset_term_s32.val[2], a_offset);
+ a_offset_term_s32.val[3] = vmulq_n_s32(a_offset_term_s32.val[3], a_offset);
+
+ float32x4x4_t a_offset_term_scaled = {{
+ vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[0]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[1]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[2]), scale),
+ vmulq_n_f32(vcvtq_f32_s32(a_offset_term_s32.val[3]), scale),
+ }};
+
+ float32x4x4_t in_f32 = {{vld1q_f32(mm_result_ptr + x + 0), vld1q_f32(mm_result_ptr + x + 4),
+ vld1q_f32(mm_result_ptr + x + 8), vld1q_f32(mm_result_ptr + x + 12)}};
+
+ // Add the offset terms to GEMM's result
+ in_f32.val[0] = vaddq_f32(in_f32.val[0], a_offset_term_scaled.val[0]);
+ in_f32.val[1] = vaddq_f32(in_f32.val[1], a_offset_term_scaled.val[1]);
+ in_f32.val[2] = vaddq_f32(in_f32.val[2], a_offset_term_scaled.val[2]);
+ in_f32.val[3] = vaddq_f32(in_f32.val[3], a_offset_term_scaled.val[3]);
+
+ // Store the result with the offset contribution
+ vst1q_f32(mm_result_ptr + x + 0, in_f32.val[0]);
+ vst1q_f32(mm_result_ptr + x + 4, in_f32.val[1]);
+ vst1q_f32(mm_result_ptr + x + 8, in_f32.val[2]);
+ vst1q_f32(mm_result_ptr + x + 12, in_f32.val[3]);
+ }
+
+ // Left-overs loop
+ for (; x < window_end_x; ++x)
+ {
+ // Compute the leftover term due to a_offset.
+ const int32_t a_offset_term_s32 = *(vector_sum_col_ptr + x);
+
+ // Add the offset terms to GEMM's result
+ // Store the result with the offset contribution
+ mm_result_ptr[x] += a_offset_term_s32 * a_offset * scale;
+ }
+ },
+ vector_sum_col_it, mm_result_it);
+ }
+ else // false, false
+ {
+ // No offset contribution from matrix A and matrix B
+ return;
+ }
+}
+
void run_offset_contribution(const Window &window,
ITensor *mm_result,
const ITensor *vector_sum_col,
@@ -361,7 +632,8 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result,
ITensorInfo *vector_sum_row,
int32_t k,
int32_t a_offset,
- int32_t b_offset)
+ int32_t b_offset,
+ float scale)
{
// Perform validate step
ARM_COMPUTE_UNUSED(vector_sum_row);
@@ -370,10 +642,11 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result,
_a_offset = a_offset;
_b_offset = b_offset;
- _k_offset = a_offset * b_offset * k;
+ _k = k;
- // If a_offset == 0, vector_sum_col can be a nullptr
- if (a_offset != 0)
+ _scale = scale;
+
+ if (vector_sum_col != nullptr)
{
// Check if vector_sum_col_shape should be slidden or not
// Don't slide vector_sum_col_shape along the y dimension if vector_sum_col_shape has just 1 dimension and vector_sum_row_shape more than 1
@@ -386,6 +659,21 @@ void CpuGemmLowpOffsetContributionKernel::configure(ITensorInfo *mm_result,
ICpuKernel::configure(win);
}
+void CpuGemmLowpOffsetContributionKernel::set_a_offset(int32_t a_offset)
+{
+ _a_offset = a_offset;
+}
+
+void CpuGemmLowpOffsetContributionKernel::set_b_offset(int32_t b_offset)
+{
+ _b_offset = b_offset;
+}
+
+void CpuGemmLowpOffsetContributionKernel::set_scale(float scale)
+{
+ _scale = scale;
+}
+
Status CpuGemmLowpOffsetContributionKernel::validate(const ITensorInfo *mm_result,
const ITensorInfo *vector_sum_col,
const ITensorInfo *vector_sum_row,
@@ -410,8 +698,18 @@ void CpuGemmLowpOffsetContributionKernel::run_op(ITensorPack &tensors, const Win
const bool reinterpret_as_3d = vector_sum_row != nullptr && mm_result->info()->num_dimensions() > 1 &&
mm_result->info()->tensor_shape().y() != vector_sum_row->info()->tensor_shape().x();
- run_offset_contribution(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, _k_offset,
- _slide_vector_sum_col, reinterpret_as_3d);
+ // check to see what is the output type of result
+ auto k_offset = _a_offset * _b_offset * _k;
+ if (mm_result->info()->data_type() == DataType::F32)
+ {
+ run_offset_contribution_float(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, k_offset,
+ _scale, _slide_vector_sum_col, reinterpret_as_3d);
+ }
+ else
+ {
+ run_offset_contribution(window, mm_result, vector_sum_col, vector_sum_row, _a_offset, _b_offset, k_offset,
+ _slide_vector_sum_col, reinterpret_as_3d);
+ }
}
const char *CpuGemmLowpOffsetContributionKernel::name() const
diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h
index 08b2d47529..ecbfb0c282 100644
--- a/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h
+++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2022 Arm Limited.
+ * Copyright (c) 2017-2022,2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,12 +21,14 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H
-#define ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H
+#ifndef ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
+#define ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
#include "src/core/common/Macros.h"
#include "src/cpu/ICpuKernel.h"
+#include <cstdint>
+
namespace arm_compute
{
namespace cpu
@@ -62,13 +64,16 @@ public:
* @param[in] k Number of matrix A columns or Matrix B rows
* @param[in] a_offset Offset to be added to each element of the matrix A.
* @param[in] b_offset Offset to be added to each element of the matrix B.
+ * @param[in] scale (Optional) multiplies the contribution to make it the same scale as the dst in the case where mm_result is float
+ * (and so has already been scaled). Default is 1.0
*/
void configure(ITensorInfo *mm_result,
ITensorInfo *vector_sum_col,
ITensorInfo *vector_sum_row,
int32_t k,
int32_t a_offset,
- int32_t b_offset);
+ int32_t b_offset,
+ float scale = 1.0f);
/** Static function to check if given info will lead to a valid configuration
*
* Similar to CpuGemmLowpOffsetContributionKernel::configure()
@@ -81,6 +86,29 @@ public:
int32_t a_offset,
int32_t b_offset);
+ /** Set the a offset
+ * Warning: if a_offset is non-zero then vector_sum_col must be set in run_op.
+ * Run configure or validate again if you aren't sure
+ *
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ */
+ void set_a_offset(int32_t a_offset);
+
+ /** Set the b offset
+ * Warning: if b_offset is non-zero then vector_sum_row must be set in run_op.
+ * Run configure or validate again if you aren't sure
+ *
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ */
+ void set_b_offset(int32_t b_offset);
+
+ /** Set the dequantize scale
+ *
+ * @param[in] scale Multiplies the contribution to make it the same scale as the dst in the case where
+ * mm_result is float (and so has already been scaled).
+ */
+ void set_scale(float scale);
+
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
const char *name() const override;
@@ -88,10 +116,11 @@ public:
private:
int32_t _a_offset{0};
int32_t _b_offset{0};
- int32_t _k_offset{0};
+ int32_t _k{0}; // Number of columns of A or rows of B, used in last offset term
+ float _scale{1.0};
bool _slide_vector_sum_col{true};
};
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_KERNEL_H */
+#endif // ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONKERNEL_H
diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp
index d008842398..3c113f2828 100644
--- a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp
+++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2021, 2023 Arm Limited.
+ * Copyright (c) 2019-2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -919,7 +919,7 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::configure(const ITensorInfo
_a_offset = a_offset;
_b_offset = b_offset;
- _k_offset = a_offset * b_offset * k;
+ _k = k;
_output_stage = output_stage;
// If a_offset == 0, vector_sum_col can be a nullptr
@@ -958,6 +958,16 @@ Status CpuGemmLowpOffsetContributionOutputStageKernel::validate(const ITensorInf
return Status{};
}
+void CpuGemmLowpOffsetContributionOutputStageKernel::set_a_offset(int32_t a_offset)
+{
+ _a_offset = a_offset;
+}
+
+void CpuGemmLowpOffsetContributionOutputStageKernel::set_b_offset(int32_t b_offset)
+{
+ _b_offset = b_offset;
+}
+
void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &tensors,
const Window &window,
const ThreadInfo &info)
@@ -993,10 +1003,11 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &te
// Check if symmetric per-channel execution
const bool is_symm = _output_stage.is_quantized_per_channel;
+ auto k_offset = _a_offset * _b_offset * _k;
if (is_symm)
{
run_offset_contribution_output_stage_symm(window, mm_result, vector_sum_col, vector_sum_row, bias, dst,
- _a_offset, _b_offset, _k_offset, _is_vector_sum_col_batched,
+ _a_offset, _b_offset, k_offset, _is_vector_sum_col_batched,
_output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point);
}
else
@@ -1004,13 +1015,13 @@ void CpuGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &te
if (is_signed)
{
run_offset_contribution_output_stage<int8_t>(
- window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset,
+ window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, k_offset,
_is_vector_sum_col_batched, _output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point);
}
else
{
run_offset_contribution_output_stage<uint8_t>(
- window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, _k_offset,
+ window, mm_result, vector_sum_col, vector_sum_row, bias, dst, _a_offset, _b_offset, k_offset,
_is_vector_sum_col_batched, _output_stage, reinterpret_as_3d, is_bounded_relu, is_fixed_point);
}
}
diff --git a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h
index af477d4756..ff706ff3dc 100644
--- a/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h
+++ b/src/cpu/kernels/CpuGemmLowpOffsetContributionOutputStageKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2022 Arm Limited.
+ * Copyright (c) 2019-2022, 2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -21,8 +21,8 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H
-#define ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H
+#ifndef ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H
+#define ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H
#include "arm_compute/core/KernelDescriptors.h"
@@ -110,6 +110,22 @@ public:
int32_t b_offset,
GEMMLowpOutputStageInfo output_stage);
+ /** Set the a offset
+ * Warning: if a_offset is non-zero then vector_sum_col must be set in run_op.
+ * Run configure or validate again if you aren't sure
+ *
+ * @param[in] a_offset Offset to be added to each element of the matrix A.
+ */
+ void set_a_offset(int32_t a_offset);
+
+ /** Set the b offset
+ * Warning: if b_offset is non-zero then vector_sum_col must be set in run_op.
+ * Run configure or validate again if you aren't sure
+ *
+ * @param[in] b_offset Offset to be added to each element of the matrix B.
+ */
+ void set_b_offset(int32_t b_offset);
+
// Inherited methods overridden:
void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
const char *name() const override;
@@ -118,11 +134,11 @@ private:
/** Function to use for the particular tensors passed to configure() */
int32_t _a_offset{0};
int32_t _b_offset{0};
- int32_t _k_offset{0};
+ int32_t _k{0}; // Number of columns of A or rows of B, used in last offset term
bool _is_vector_sum_col_batched{true};
GEMMLowpOutputStageInfo _output_stage{GEMMLowpOutputStageInfo()};
};
} // namespace kernels
} // namespace cpu
} // namespace arm_compute
-#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H */
+#endif // ACL_SRC_CPU_KERNELS_CPUGEMMLOWPOFFSETCONTRIBUTIONOUTPUTSTAGEKERNEL_H
diff --git a/src/cpu/kernels/assembly/arm_gemm.hpp b/src/cpu/kernels/assembly/arm_gemm.hpp
index 5d7cf79857..941fed0ba8 100644
--- a/src/cpu/kernels/assembly/arm_gemm.hpp
+++ b/src/cpu/kernels/assembly/arm_gemm.hpp
@@ -260,6 +260,19 @@ public:
}
};
+struct DequantizeFloat
+{
+public:
+ float scale = 0;
+
+ DequantizeFloat() = default;
+
+ // Constructor
+ DequantizeFloat(const float scale) : scale(scale)
+ {
+ }
+};
+
struct Nothing
{
};
diff --git a/src/cpu/kernels/assembly/gemm_common.hpp b/src/cpu/kernels/assembly/gemm_common.hpp
index 4825814e31..45d1e43274 100644
--- a/src/cpu/kernels/assembly/gemm_common.hpp
+++ b/src/cpu/kernels/assembly/gemm_common.hpp
@@ -166,6 +166,12 @@ public:
{
}
+ /*** Dequanize scale interface (optional) ***/
+ /* Set the dequantize scale for GEMMs when converting from int to float (float out = scale * float(int out) ) */
+ virtual void set_dequantize_scale(const float)
+ {
+ }
+
/*** Introspection interface ***/
/* Get the configuration of this GEMM */
virtual GemmConfig get_config() = 0;
diff --git a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
index 52d2f17dbf..f3396fbb5c 100644
--- a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
+++ b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.cpp
@@ -128,6 +128,11 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
_reshape_b_only_on_first_run;
_gemm_info = gemm_info;
+ // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
+ // It is not needed if the datatype is symmetric, because there is no offset
+ bool a_offset_kernel_needed = _a_offset != 0 || a->quantization_info().is_dynamic();
+ bool b_offset_kernel_needed = _b_offset != 0 || b->quantization_info().is_dynamic();
+
_asm_glue = std::make_unique<cpu::CpuGemmAssemblyDispatch>();
const ITensorInfo *a_to_use = a;
@@ -229,8 +234,7 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
// Build reduction info
const GEMMLowpReductionKernelInfo reduction_info(a_to_use->dimension(0), false, 0, false);
- // Initialize matrix B reduction kernel only if _a_offset is not equal to 0
- if (_a_offset != 0)
+ if (a_offset_kernel_needed)
{
_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
@@ -239,8 +243,7 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
_mtx_b_reduction_kernel->configure(b, &_vector_sum_col, reduction_info);
}
- // Initialize Matrix A reduction kernel only if _b_offset is not equal to 0
- if (_b_offset != 0)
+ if (b_offset_kernel_needed)
{
_vector_sum_row = TensorInfo(compute_reductionB_shape(*a_to_use), 1, DataType::S32);
@@ -261,8 +264,8 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
_offset_contribution_output_stage_kernel =
std::make_unique<kernels::CpuGemmLowpOffsetContributionOutputStageKernel>();
_offset_contribution_output_stage_kernel->configure(
- &_mm_result_s32, _a_offset == 0 ? nullptr : &_vector_sum_col,
- _b_offset == 0 ? nullptr : &_vector_sum_row, c, _flip_signedness ? &_signed_output : dst,
+ &_mm_result_s32, a_offset_kernel_needed ? &_vector_sum_col : nullptr,
+ b_offset_kernel_needed ? &_vector_sum_row : nullptr, c, _flip_signedness ? &_signed_output : dst,
a->dimension(0), _a_offset, _b_offset, info.gemmlowp_output_stage());
if (_flip_signedness)
@@ -273,6 +276,11 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
}
else
{
+ // This scale is needed for the s8_f32 kernel where the multiplication output is dequantized to F32.
+ const float dequantize_scale =
+ (dst->data_type() == DataType::F32)
+ ? a->quantization_info().uniform().scale * b->quantization_info().uniform().scale
+ : 1.0f;
// Configure matrix multiply kernel
if (!_assembly_path)
{
@@ -281,9 +289,9 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
}
// Configure offset contribution kernel
_offset_contribution_kernel = std::make_unique<kernels::CpuGemmLowpOffsetContributionKernel>();
- _offset_contribution_kernel->configure(dst, _a_offset == 0 ? nullptr : &_vector_sum_col,
- _b_offset == 0 ? nullptr : &_vector_sum_row, a_to_use->dimension(0),
- _a_offset, _b_offset);
+ _offset_contribution_kernel->configure(dst, a_offset_kernel_needed ? &_vector_sum_col : nullptr,
+ b_offset_kernel_needed ? &_vector_sum_row : nullptr,
+ a_to_use->dimension(0), _a_offset, _b_offset, dequantize_scale);
}
}
// Configure activation
@@ -306,11 +314,11 @@ void CpuGemmLowpMatrixMultiplyCore::configure(
}
// Request memory for LHS and RHS reshape matrix
- _aux_mem[VectorSumCol] =
- MemoryInfo(offset_int_vec(VectorSumCol),
- !_fused_assembly_path && _a_offset != 0 && _reshape_b_only_on_first_run ? MemoryLifetime::Persistent
- : MemoryLifetime::Temporary,
- _vector_sum_col.total_size());
+ _aux_mem[VectorSumCol] = MemoryInfo(offset_int_vec(VectorSumCol),
+ !_fused_assembly_path && a_offset_kernel_needed && _reshape_b_only_on_first_run
+ ? MemoryLifetime::Persistent
+ : MemoryLifetime::Temporary,
+ _vector_sum_col.total_size());
_aux_mem[VectorSumRow] =
MemoryInfo(offset_int_vec(VectorSumRow), MemoryLifetime::Temporary, _vector_sum_row.total_size());
_aux_mem[TmpA] = MemoryInfo(offset_int_vec(TmpA), MemoryLifetime::Temporary, _tmp_a.total_size());
@@ -334,8 +342,8 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(b, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED,
DataType::QSYMM8, DataType::QSYMM8_PER_CHANNEL);
ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32, DataType::QASYMM8,
- DataType::QASYMM8_SIGNED);
- ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr &&
+ DataType::QASYMM8_SIGNED, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(c != nullptr && output->data_type() != DataType::F32 &&
gemm_info.gemmlowp_output_stage().type == GEMMLowpOutputStageType::NONE,
"Bias addition not supported in NEGEMMLowpMatrixMultiplyCore for output S32");
ARM_COMPUTE_RETURN_ERROR_ON_MSG(
@@ -367,6 +375,10 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
int32_t a_offset = a->quantization_info().uniform().offset;
int32_t b_offset = b->quantization_info().uniform().offset;
+ // Offset kernel is need if offset is non-zero or it may change (i.e. dynamic).
+ bool a_offset_kernel_needed = a_offset != 0 || a->quantization_info().is_dynamic();
+ bool b_offset_kernel_needed = b_offset != 0 || b->quantization_info().is_dynamic();
+
bool fuse_output_stage = info.gemmlowp_output_stage().type != GEMMLowpOutputStageType::NONE;
if (fuse_output_stage)
{
@@ -489,7 +501,7 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
const GEMMLowpReductionKernelInfo reduction_info(a_to_use->dimension(0), false, 0, false);
// Validate matrix B reduction kernel only if _a_offset is not equal to 0
- if (a_offset != 0)
+ if (a_offset_kernel_needed)
{
info_vector_sum_col = TensorInfo(compute_reductionA_shape(*b), 1, DataType::S32);
@@ -499,7 +511,7 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
}
// Validate Matrix A reduction kernel only if _b_offset is not equal to 0
- if (b_offset != 0)
+ if (b_offset_kernel_needed)
{
info_vector_sum_row = TensorInfo(compute_reductionB_shape(*a), 1, DataType::S32);
@@ -525,9 +537,9 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
// Validate offset contribution kernel
ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpOffsetContributionOutputStageKernel::validate(
- &mm_result_s32_info, a_offset == 0 ? nullptr : &info_vector_sum_col,
- b_offset == 0 ? nullptr : &info_vector_sum_row, c, flip_signedness ? &signed_output : output, a_offset,
- b_offset, info.gemmlowp_output_stage()));
+ &mm_result_s32_info, a_offset_kernel_needed ? &info_vector_sum_col : nullptr,
+ b_offset_kernel_needed ? &info_vector_sum_row : nullptr, c, flip_signedness ? &signed_output : output,
+ a_offset, b_offset, info.gemmlowp_output_stage()));
}
else
{
@@ -545,8 +557,8 @@ Status CpuGemmLowpMatrixMultiplyCore::validate(const ITensorInfo *a,
}
// Validate offset contribution kernel
ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuGemmLowpOffsetContributionKernel::validate(
- output, a_offset == 0 ? nullptr : &info_vector_sum_col, b_offset == 0 ? nullptr : &info_vector_sum_row,
- a_offset, b_offset));
+ output, a_offset_kernel_needed ? &info_vector_sum_col : nullptr,
+ b_offset_kernel_needed ? &info_vector_sum_row : nullptr, a_offset, b_offset));
}
}
@@ -580,6 +592,14 @@ void CpuGemmLowpMatrixMultiplyCore::run(ITensorPack &tensors)
CpuAuxTensorHandler signed_a(offset_int_vec(SignedA), _signed_a, tensors, false);
CpuAuxTensorHandler signed_output(offset_int_vec(SignedOutput), _signed_output, tensors, false);
+ const QuantizationInfo a_qinfo = a->info()->quantization_info();
+ const QuantizationInfo b_qinfo = b->info()->quantization_info();
+
+ if (a_qinfo.is_dynamic())
+ _a_offset = a_qinfo.uniform().offset;
+ if (b_qinfo.is_dynamic())
+ _b_offset = b_qinfo.uniform().offset;
+
// Convert QASYMM8->QASYMM8_SIGNED
if (_flip_signedness)
{
@@ -662,6 +682,11 @@ void CpuGemmLowpMatrixMultiplyCore::run(ITensorPack &tensors)
if (_fuse_output_stage)
{
+ if (a_qinfo.is_dynamic())
+ _offset_contribution_output_stage_kernel->set_a_offset(_a_offset);
+ if (b_qinfo.is_dynamic())
+ _offset_contribution_output_stage_kernel->set_b_offset(_b_offset);
+
ITensorPack pack;
pack.add_tensor(TensorType::ACL_SRC_0, mm_result_s32.get());
pack.add_tensor(TensorType::ACL_SRC_1, _a_offset == 0 ? nullptr : vector_sum_col.get());
@@ -675,6 +700,16 @@ void CpuGemmLowpMatrixMultiplyCore::run(ITensorPack &tensors)
}
else
{
+ if (a_qinfo.is_dynamic())
+ _offset_contribution_kernel->set_a_offset(_a_offset);
+ if (b_qinfo.is_dynamic())
+ _offset_contribution_kernel->set_b_offset(_b_offset);
+ if (a_qinfo.is_dynamic() || b_qinfo.is_dynamic())
+ {
+ const float dequantize_scale = a_qinfo.uniform().scale * b_qinfo.uniform().scale;
+ _offset_contribution_kernel->set_scale(dequantize_scale);
+ }
+
ITensorPack pack;
pack.add_tensor(TensorType::ACL_SRC_0, _a_offset == 0 ? nullptr : vector_sum_col.get());
pack.add_tensor(TensorType::ACL_SRC_1, _b_offset == 0 ? nullptr : vector_sum_row.get());
diff --git a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
index 78065a8953..38121c9bb4 100644
--- a/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
+++ b/src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2021, 2023 Arm Limited.
+ * Copyright (c) 2021, 2023-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -92,6 +92,7 @@ public:
* |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |S32 |
* |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |S32 |
* |QASYMM8_SIGNED |QSYMM8 |S32 |S32 |
+ * |QASYMM8_SIGNED |QASYMM8_SIGNED |F32 |F32 |
*
* @note GEMM_LOWP: low precision GEMM kernel
* This kernel performs the following computations:
@@ -100,12 +101,12 @@ public:
* -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
* -# Compute the matrix product of the resulting a * b in int32.
*
- * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
+ * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED/F32 otherwise
*
* @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
* @param[in] b Second input tensor info (Matrix B). Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL.
- * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32
- * @param[out] dst Output tensor info. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
+ * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32/F32
+ * @param[out] dst Output tensor info. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED/F32
* @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
* if the reshape of matrix B should be executed only for the first run
*/
diff --git a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
index 01a74a5a56..7d85885654 100644
--- a/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
+++ b/src/cpu/operators/internal/CpuGemmAssemblyDispatch.cpp
@@ -540,6 +540,13 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::configure(const ITensorInfo *
{
configure_indirect(a, b, d, gemm_info);
}
+
+ if (std::is_same<OutputStage, arm_gemm::DequantizeFloat>::value)
+ {
+ // Output dequantization is just the two src scales multiplied together
+ _gemm_kernel_asm->set_dequantize_scale(a->quantization_info().uniform().scale *
+ b->quantization_info().uniform().scale);
+ }
}
template <typename TypeInput, typename TypeOutput, class OutputStage>
@@ -630,6 +637,15 @@ void Fallback<TypeInput, TypeOutput, OutputStage>::run(ITensorPack &tensors)
auto d = tensors.get_tensor(TensorType::ACL_DST);
ARM_COMPUTE_ERROR_ON_NULLPTR(a, d);
+ // Only update at runtime if the src quantization is dynamic
+ if (std::is_same<OutputStage, arm_gemm::DequantizeFloat>::value &&
+ (a->info()->quantization_info().is_dynamic() || b->info()->quantization_info().is_dynamic()))
+ {
+ // Output dequantization is just the two src scales multiplied together
+ _gemm_kernel_asm->set_dequantize_scale(a->info()->quantization_info().uniform().scale *
+ b->info()->quantization_info().uniform().scale);
+ }
+
int lda = a->info()->strides_in_bytes().y() / a->info()->element_size();
int ldb = 0;
const int ldd = d->info()->strides_in_bytes().y() / d->info()->element_size();
@@ -784,6 +800,39 @@ void create_arm_gemm(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_ge
}
template <typename TypeInput, typename TypeOutput>
+void create_arm_gemm_dequant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
+ const ITensorInfo *a,
+ const ITensorInfo *b,
+ const ITensorInfo *c,
+ ITensorInfo *d,
+ arm_gemm::Activation activation,
+ const AsmGemmInfo &info)
+{
+ ARM_COMPUTE_UNUSED(activation);
+
+ Params p = extract_parameters(a, b, d, info);
+ const CPUInfo &ci = NEScheduler::get().cpu_info();
+ const unsigned int num_threads = NEScheduler::get().num_threads();
+
+ arm_gemm::GemmConfig cfg;
+ cfg.weight_format = assembly_utils::map_to_arm_gemm_weight_format(info.weight_format);
+ arm_gemm::GemmArgs args(&ci, p.M, p.N, p.K, p.sections, p.batches, p.multis, p.indirect, activation, num_threads,
+ info.fixed_format, info.fast_mode, info.accumulate, &cfg);
+
+ // Create arm_gemm fallback
+ auto fallback = std::make_unique<Fallback<TypeInput, TypeOutput, arm_gemm::DequantizeFloat>>();
+
+ // Configure requantization info
+ const GEMMLowpOutputStageInfo os_info = info.output_stage;
+
+ arm_gemm::DequantizeFloat gemm_dequant_info{};
+ gemm_dequant_info = arm_gemm::DequantizeFloat(d->quantization_info().uniform().scale);
+
+ fallback->configure(a, b, c, d, args, info, gemm_dequant_info);
+ arm_gemm = std::move(fallback);
+}
+
+template <typename TypeInput, typename TypeOutput>
void create_arm_gemm_quant(std::unique_ptr<CpuGemmAssemblyDispatch::IFallback> &arm_gemm,
const ITensorInfo *a,
const ITensorInfo *b,
@@ -1031,6 +1080,10 @@ void CpuGemmAssemblyDispatch::configure(
{
create_arm_gemm<int8_t, int32_t>(_arm_gemm, a, b, c, d, act, info);
}
+ else if (d->data_type() == DataType::F32)
+ {
+ create_arm_gemm_dequant<int8_t, float>(_arm_gemm, a, b, c, d, act, info);
+ }
else
{
create_arm_gemm_quant<int8_t, int8_t>(_arm_gemm, a, b, c, d, act, info);