aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp393
1 files changed, 300 insertions, 93 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 8bef2b7bae..0c1d3a387b 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 ARM Limited.
+ * Copyright (c) 2017-2024 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -24,153 +24,359 @@
#include "arm_gemm.hpp"
#include "gemm_common.hpp"
#include "gemm_hybrid.hpp"
+#include "gemm_hybrid_indirect.hpp"
#include "gemm_implementation.hpp"
#include "gemm_interleaved.hpp"
-#include "gemm_interleaved_2d.hpp"
-#include "gemm_interleaved_pretransposed_2d.hpp"
-#include "gemm_native.hpp"
#include "gemv_batched.hpp"
-#include "gemv_native_transposed.hpp"
#include "gemv_pretransposed.hpp"
#include "kernels/a32_sgemm_8x6.hpp"
-#include "kernels/a64_hybrid_fp32_mla_16x4.hpp"
-#include "kernels/a64_hybrid_fp32_mla_4x8.hpp"
-#include "kernels/a64_native_fp32_mla_16x4.hpp"
-#include "kernels/a64_smallK_hybrid_fp32_mla_4x6.hpp"
-#include "kernels/a64_smallK_hybrid_fp32_mla_4x8.hpp"
-#include "kernels/a64_sgemm_12x8.hpp"
-#include "kernels/a64_sgemv_pretransposed.hpp"
-#include "kernels/a64_sgemv_trans.hpp"
+#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/a64_ffhybrid_fp32_mla_6x16.hpp"
+#include "kernels/a64_ffhybrid_fp32bf16fp32_mmla_4x24.hpp"
+#include "kernels/a64_ffhybrid_fp32bf16fp32_mmla_6x16.hpp"
+#include "kernels/a64_ffinterleaved_bf16fp32_mmla_8x12.hpp"
+#include "kernels/a64_ffinterleaved_fp32_mla_8x12.hpp"
+#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/a64_hybrid_fp32bf16fp32_mmla_4x24.hpp"
+#include "kernels/a64_hybrid_fp32bf16fp32_mmla_6x16.hpp"
+#include "kernels/a64_hybrid_fp32_mla_4x24.hpp"
+#include "kernels/a64_hybrid_fp32_mla_6x16.hpp"
+#include "kernels/a64_hybrid_fp32_mla_8x4.hpp"
+#include "kernels/a64_interleaved_bf16fp32_mmla_8x12.hpp"
+#include "kernels/a64_sgemm_8x12.hpp"
+#include "kernels/a64_sgemm_8x6.hpp"
+#include "kernels/a64_smallK_hybrid_fp32_mla_6x4.hpp"
+#include "kernels/a64_smallK_hybrid_fp32_mla_8x4.hpp"
-#include "kernels/sve_hybrid_fp32_mla_4VLx4.hpp"
-#include "kernels/sve_interleaved_fp32_mla_3VLx8.hpp"
-#include "kernels/sve_native_fp32_mla_4VLx4.hpp"
-#include "kernels/sve_smallK_hybrid_fp32_mla_1VLx8.hpp"
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#include "kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp"
+#include "kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp"
+#include "kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp"
+#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_SME2
+#include "kernels/sme2_gemv_fp32_mla_16VL.hpp"
+#include "kernels/sme2_gemv_fp32bf16fp32_dot_16VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_fp32_mopa_4VLx1VL.hpp"
+#include "kernels/sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SME2
+
+#include "kernels/sve_ffhybrid_fp32_mla_6x4VL.hpp"
+#include "kernels/sve_ffhybrid_fp32bf16fp32_mmla_4x6VL.hpp"
+#include "kernels/sve_ffinterleaved_fp32_mla_8x3VL.hpp"
+#include "kernels/sve_ffinterleaved_bf16fp32_mmla_8x3VL.hpp"
+#include "kernels/sve_hybrid_fp32bf16fp32_mmla_4x6VL.hpp"
+#include "kernels/sve_hybrid_fp32bf16fp32_mmla_6x4VL.hpp"
+#include "kernels/sve_hybrid_fp32_mla_6x4VL.hpp"
+#include "kernels/sve_hybrid_fp32_mla_8x1VL.hpp"
+#include "kernels/sve_interleaved_bf16fp32_mmla_8x3VL.hpp"
+#include "kernels/sve_interleaved_fp32_mla_8x3VL.hpp"
+#include "kernels/sve_interleaved_fp32_mmla_8x3VL.hpp"
+#endif // ARM_COMPUTE_ENABLE_SVE
namespace arm_gemm {
static const GemmImplementation<float, float> gemm_fp32_methods[] =
{
+// GEMV cases - starting with 'gemv_batched' wrapper to turn batched GEMV into GEMM.
{
GemmMethod::GEMV_BATCHED,
"gemv_batched",
- [](const GemmArgs &args) { return (args._Msize==1) && (args._nbatches>1); },
+ [](const GemmArgs &args) { return args._Msize==1 && args._nbatches>1 && !args._indirect_input; },
nullptr,
[](const GemmArgs &args) { return new GemvBatched<float, float>(args); }
},
#ifdef __aarch64__
+#ifdef ARM_COMPUTE_ENABLE_BF16
+// "fast mode" (BF16) kernels
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_interleaved_bf16fp32_mmla_8x12",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>(args); }
+),
+
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_fp32bf16fp32_mmla_6x16",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_fp32bf16fp32_mmla_4x24",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
+),
+#endif // ARM_COMPUTE_ENABLE_BF16
+#ifdef ARM_COMPUTE_ENABLE_SVE
+#ifdef ARM_COMPUTE_ENABLE_SME2
+// SME kernels
{
- GemmMethod::GEMV_PRETRANSPOSED,
- "sgemv_pretransposed",
- [](const GemmArgs &args) { return (args._Msize==1 && args._pretransposed_hint && args._nbatches==1); },
+ GemmMethod::GEMM_HYBRID,
+ "sme2_gemv_fp32bf16fp32_dot_16VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
nullptr,
- [](const GemmArgs &args) { return new GemvPretransposed<sgemv_pretransposed, float, float>(args); }
+ [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32bf16fp32_dot_16VL, float, float>(args); }
},
{
- GemmMethod::GEMV_NATIVE_TRANSPOSED,
- "sgemv_trans",
- [](const GemmArgs &args) { return (args._Msize==1 && !args._trA && !args._trB && args._nbatches==1); },
+ GemmMethod::GEMM_HYBRID,
+ "sme2_gemv_fp32_mla_16VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2() && args._Msize==1 && args._nbatches==1 && !args._indirect_input && !args._accumulate; },
nullptr,
- [](const GemmArgs &args) { return new GemvNativeTransposed<sgemv_trans, float, float>(args); }
+ [](const GemmArgs &args) { return new GemvPretransposed<cls_sme2_gemv_fp32_mla_16VL, float, float>(args); }
},
-
-#ifdef __ARM_FEATURE_SVE
-// SVE smallk / native / hybrid methods
+#ifdef ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_HYBRID,
- "smallK_hybrid_fp32_mla_1VLx8",
- [](const GemmArgs &args) { return (args._Ksize <= 24) && !args._trA && args._pretransposed_hint; },
- nullptr,
- [](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_1VLx8, float, float>(args); }
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_1VLx4VL, float, float>(args); }
},
+#endif // ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_HYBRID,
- "hybrid_fp32_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
- [](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_4VLx4, float, float>(args); }
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp32_mopa_1VLx4VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize >= 8*VL || args._Msize <= VL || (2*VL < args._Msize && args._Msize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_1VLx4VL, float, float>(args); }
},
+#ifdef ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_NATIVE,
- "native_fp32_mla_4VLx4",
- [](const GemmArgs &args) { return (args._Ksize>4 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmNative<native_fp32_mla_4VLx4, float, float>(args); }
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_4VLx1VL, float, float>(args); }
},
-#endif // __ARM_FEATURE_SVE
-
-// NEON native / hybrid methods
+#endif // ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_HYBRID,
- "smallK_hybrid_fp32_mla_4x8",
- [](const GemmArgs &args) { return (args._Ksize <= 8) && (args._Nsize % 4)==0 && !args._trA && args._pretransposed_hint; },
- nullptr,
- [](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_4x8, float, float>(args); }
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp32_mopa_4VLx1VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
+ [](const GemmArgs &args) { const auto VL = sme::get_vector_length<float>();
+ return args._Nsize <= VL || (2*VL < args._Nsize && args._Nsize <= 3*VL); },
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_4VLx1VL, float, float>(args); }
},
+#ifdef ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_HYBRID,
- "smallK_hybrid_fp32_mla_4x6",
- [](const GemmArgs &args) { return (args._Ksize > 8) && (args._Ksize <= 16) && (args._Nsize % 4)==0 && !args._trA && args._pretransposed_hint; },
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_sme2() && !args._accumulate; },
nullptr,
- [](const GemmArgs &args) { return new GemmHybrid<smallK_hybrid_fp32_mla_4x6, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_bf16fp32_mopa_2VLx2VL, float, float>(args); }
},
+#endif // ARM_COMPUTE_ENABLE_BF16
{
- GemmMethod::GEMM_HYBRID,
- "hybrid_fp32_mla_4x8_normal",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
- [](const GemmArgs &args) { return (args._Nsize < 12); },
- [](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_4x8, float, float>(args); }
+ GemmMethod::GEMM_INTERLEAVED,
+ "sme2_interleaved_nomerge_fp32_mopa_2VLx2VL",
+ [](const GemmArgs &args) { return args._ci->has_sme2() && !args._accumulate; },
+ nullptr,
+ [](const GemmArgs &args) { return new GemmInterleavedNoMerge<cls_sme2_interleaved_nomerge_fp32_mopa_2VLx2VL, float, float>(args); }
},
-{
+#endif // ARM_COMPUTE_ENABLE_SME2
+#ifdef ARM_COMPUTE_ENABLE_BF16
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_bf16fp32_mmla_8x3VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
- "hybrid_fp32_mla_16x4_normal",
- [](const GemmArgs &args) { return (args._Ksize >= 4) && !args._trA && args._pretransposed_hint; },
- [](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || (args._Msize < 16) || (args._nmulti > 1); },
- [](const GemmArgs &args) { return new GemmHybrid<hybrid_fp32_mla_16x4, float, float>(args); }
+ "sve_hybrid_fp32bf16fp32_mmla_6x4VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_hybrid_fp32bf16fp32_mmla_4x6VL",
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
+),
+#endif // ARM_COMPUTE_ENABLE_BF16
+#ifdef ARM_COMPUTE_ENABLE_SVEF32MM
+// MMLA next due to higher throughput (which is SVE only)
+// Prefer this in all cases, except if fast mode is requested and BF16 is available.
+{
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_fp32_mmla_8x3VL",
+ [](const GemmArgs &args) { return args._ci->has_svef32mm() && (args._Ksize>4); },
+ [](const GemmArgs &args) { return !(args._fast_mode && args._ci->has_bf16()); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float>(args); }
},
+#endif // ARM_COMPUTE_ENABLE_SVEF32MM
+// SVE kernels
{
- GemmMethod::GEMM_NATIVE,
- "native_fp32_mla_16x4",
- [](const GemmArgs &args) { return (args._Ksize>4 && (args._Nsize % 16)==0 && !args._trA && !args._trB); },
- [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); },
- [](const GemmArgs &args) { return new GemmNative<native_fp32_mla_16x4, float, float>(args); }
+ GemmMethod::GEMM_HYBRID,
+ "sve_hybrid_fp32_mla_8x1VL",
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return (args._Nsize < 12); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float>(args); }
},
-
-#ifdef __ARM_FEATURE_SVE
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_hybrid_fp32_mla_6x4VL",
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_interleaved_fp32_mla_8x3VL",
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>(args); }
+),
+ #ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_BF16
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_bf16fp32_mmla_8x3VL",
+ KernelWeightFormat::VL2VL_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_ffhybrid_fp32bf16fp32_mmla_4x6VL",
+ KernelWeightFormat::VL2VL_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
+),
+#endif
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "sve_ffinterleaved_fp32_mla_8x3VL",
+ KernelWeightFormat::VL1VL_BL32,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "sve_ffhybrid_fp32_mla_6x4VL",
+ KernelWeightFormat::VL1VL_BL32,
+ [](const GemmArgs &args) { return args._ci->has_sve(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32_mla_6x4VL, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32_mla_6x4VL, float, float>(args); }
+),
+#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#endif // ARM_COMPUTE_ENABLE_SVE
+// Cortex-A35 specific kernel - use for any problem on A35, and never in any other cases.
{
GemmMethod::GEMM_INTERLEAVED,
- "interleaved_fp32_mla_3VLx8",
- [](const GemmArgs &args) { return (args._Ksize>4); },
+ "a64_sgemm_8x6",
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<interleaved_fp32_mla_3VLx8, float, float>(args); }
+ [](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A35; },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float>(args); }
},
-#endif // __ARM_FEATURE_SVE
-//Pretranpose, 2D split
+// Arm® Neon™ hybrid methods
{
- GemmMethod::GEMM_INTERLEAVED_2D,
- "sgemm_12x8_pretranspose_2d",
- [](const GemmArgs &args) { return args._pretransposed_hint; },
- [](const GemmArgs &args) { return args._maxthreads >= 8; },
- [](const GemmArgs &args) { return new GemmInterleavedPretransposed2d<sgemm_12x8, float, float>(args); }
+ GemmMethod::GEMM_HYBRID,
+ "a64_smallK_hybrid_fp32_mla_8x4",
+ [](const GemmArgs &args) { return args._Ksize <= 8 && (args._Nsize % 4)==0 && !args._indirect_input && !args._accumulate; },
+ nullptr,
+ [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_fp32_mla_8x4, float, float>(args); }
},
-//Tranpose, 2D split, no blockmanager
{
- GemmMethod::GEMM_INTERLEAVED_2D,
- "sgemm_12x8_2d",
+ GemmMethod::GEMM_HYBRID,
+ "a64_smallK_hybrid_fp32_mla_6x4",
+ [](const GemmArgs &args) { return (args._Ksize > 8 && args._Ksize <= 16) && (args._Nsize % 4)==0 && !args._indirect_input && !args._accumulate; },
nullptr,
- [](const GemmArgs &args) { return (!args._pretransposed_hint) && (args._maxthreads >= 8); },
- [](const GemmArgs &args) { return new GemmInterleaved2d<sgemm_12x8, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmHybrid<cls_a64_smallK_hybrid_fp32_mla_6x4, float, float>(args); }
},
-//Tranpose, 1D split, with blockmanager
{
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_fp32_mla_8x4",
+ nullptr,
+ [](const GemmArgs &args) { return (args._Nsize < 12); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float>(args); }
+},
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_fp32_mla_4x24",
+ nullptr,
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_hybrid_fp32_mla_6x16",
+ nullptr,
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_sgemm_8x12",
+ nullptr,
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float>(args); }
+),
+#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
+#ifdef ARM_COMPUTE_ENABLE_BF16
+// "fast mode" (BF16) kernels
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_INTERLEAVED,
+ "a64_ffinterleaved_bf16fp32_mmla_8x12",
+ KernelWeightFormat::VL256_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp32bf16fp32_mmla_4x24",
+ KernelWeightFormat::VL256_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp32bf16fp32_mmla_6x16",
+ KernelWeightFormat::VL256_BL64_BF16,
+ [](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
+),
+#endif // BF16
+GemmImplementation<float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
- "sgemm_12x8_1d",
+ "a64_ffinterleaved_fp32_mla_8x12",
+ KernelWeightFormat::VL128_BL32,
nullptr,
+ [](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>(args); }
+),
+GemmImplementation<float, float>::with_estimate(
+ GemmMethod::GEMM_HYBRID,
+ "a64_ffhybrid_fp32_mla_6x16",
+ KernelWeightFormat::VL128_BL32,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<sgemm_12x8, float, float>(args); }
-},
-
+ [](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32_mla_6x16, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32_mla_6x16, float, float>(args); }
+),
+#endif // ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#endif // __aarch64__
#ifdef __arm__
@@ -199,6 +405,7 @@ const GemmImplementation<float, float> *gemm_implementation_list<float, float>()
/* Explicitly instantiate the external functions for these types. */
template UniqueGemmCommon<float, float> gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<float, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
template KernelDescription get_gemm_method<float, float, Nothing>(const GemmArgs &args, const Nothing &);
template std::vector<KernelDescription> get_compatible_kernels<float, float, Nothing> (const GemmArgs &args, const Nothing &);