aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp109
1 files changed, 55 insertions, 54 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
index 0c1d3a387b..5da7161671 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp
@@ -82,7 +82,7 @@
namespace arm_gemm {
-static const GemmImplementation<float, float> gemm_fp32_methods[] =
+static const GemmImplementation<float, float, float> gemm_fp32_methods[] =
{
// GEMV cases - starting with 'gemv_batched' wrapper to turn batched GEMV into GEMM.
{
@@ -95,27 +95,27 @@ static const GemmImplementation<float, float> gemm_fp32_methods[] =
#ifdef __aarch64__
#ifdef ARM_COMPUTE_ENABLE_BF16
// "fast mode" (BF16) kernels
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_interleaved_bf16fp32_mmla_8x12",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_interleaved_bf16fp32_mmla_8x12, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32bf16fp32_mmla_6x16",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_6x16, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32bf16fp32_mmla_4x24",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_bf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32bf16fp32_mmla_4x24, float, float, float>(args); }
),
#endif // ARM_COMPUTE_ENABLE_BF16
#ifdef ARM_COMPUTE_ENABLE_SVE
@@ -189,26 +189,26 @@ GemmImplementation<float, float>::with_estimate(
},
#endif // ARM_COMPUTE_ENABLE_SME2
#ifdef ARM_COMPUTE_ENABLE_BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_bf16fp32_mmla_8x3VL",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_bf16fp32_mmla_8x3VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32bf16fp32_mmla_6x4VL",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_6x4VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32bf16fp32_mmla_4x6VL",
[](const GemmArgs &args) { return args._fast_mode && args._ci->has_svebf16(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32bf16fp32_mmla_4x6VL, float, float, float>(args); }
),
#endif // ARM_COMPUTE_ENABLE_BF16
#ifdef ARM_COMPUTE_ENABLE_SVEF32MM
@@ -218,8 +218,8 @@ GemmImplementation<float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp32_mmla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_svef32mm() && (args._Ksize>4); },
- [](const GemmArgs &args) { return !(args._fast_mode && args._ci->has_bf16()); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return !(args._fast_mode && args._ci->has_svebf16()); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mmla_8x3VL, float, float, float>(args); }
},
#endif // ARM_COMPUTE_ENABLE_SVEF32MM
// SVE kernels
@@ -228,25 +228,25 @@ GemmImplementation<float, float>::with_estimate(
"sve_hybrid_fp32_mla_8x1VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
[](const GemmArgs &args) { return (args._Nsize < 12); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_8x1VL, float, float, float>(args); }
},
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp32_mla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp32_mla_6x4VL, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp32_mla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp32_mla_8x3VL, float, float, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#ifdef ARM_COMPUTE_ENABLE_BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_bf16fp32_mmla_8x3VL",
KernelWeightFormat::VL2VL_BL64_BF16,
@@ -254,7 +254,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_bf16fp32_mmla_8x3VL, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_ffhybrid_fp32bf16fp32_mmla_4x6VL",
KernelWeightFormat::VL2VL_BL64_BF16,
@@ -263,7 +263,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_sve_ffhybrid_fp32bf16fp32_mmla_4x6VL, float, float>(args); }
),
#endif
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_ffinterleaved_fp32_mla_8x3VL",
KernelWeightFormat::VL1VL_BL32,
@@ -271,7 +271,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_sve_ffinterleaved_fp32_mla_8x3VL, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_ffhybrid_fp32_mla_6x4VL",
KernelWeightFormat::VL1VL_BL32,
@@ -287,7 +287,7 @@ GemmImplementation<float, float>::with_estimate(
"a64_sgemm_8x6",
nullptr,
[](const GemmArgs &args) { return args._ci->get_cpu_model() == CPUModel::A35; },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x6, float, float, float>(args); }
},
// Arm® Neon™ hybrid methods
{
@@ -309,33 +309,33 @@ GemmImplementation<float, float>::with_estimate(
"a64_hybrid_fp32_mla_8x4",
nullptr,
[](const GemmArgs &args) { return (args._Nsize < 12); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_8x4, float, float, float>(args); }
},
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32_mla_4x24",
nullptr,
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_4x24, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp32_mla_6x16",
nullptr,
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float>(args); }
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp32_mla_6x16, float, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
nullptr,
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float>::estimate_cycles<float>(args); },
- [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float>(args); }
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_sgemm_8x12, float, float, float>::estimate_cycles<float>(args); },
+ [](const GemmArgs &args) { return new GemmInterleaved<cls_a64_sgemm_8x12, float, float, float>(args); }
),
#ifdef ARM_COMPUTE_ENABLE_FIXED_FORMAT_KERNELS
#ifdef ARM_COMPUTE_ENABLE_BF16
// "fast mode" (BF16) kernels
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_bf16fp32_mmla_8x12",
KernelWeightFormat::VL256_BL64_BF16,
@@ -343,7 +343,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_bf16fp32_mmla_8x12, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32bf16fp32_mmla_4x24",
KernelWeightFormat::VL256_BL64_BF16,
@@ -351,7 +351,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_4x24, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32bf16fp32_mmla_6x16",
KernelWeightFormat::VL256_BL64_BF16,
@@ -359,8 +359,9 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirectFixedFormat<cls_a64_ffhybrid_fp32bf16fp32_mmla_6x16, float, float>(args); }
),
+
#endif // BF16
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_ffinterleaved_fp32_mla_8x12",
KernelWeightFormat::VL128_BL32,
@@ -368,7 +369,7 @@ GemmImplementation<float, float>::with_estimate(
[](const GemmArgs &args) { return GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>::estimate_cycles<float>(args); },
[](const GemmArgs &args) { return new GemmInterleavedFixedFormat<cls_a64_ffinterleaved_fp32_mla_8x12, float, float>(args); }
),
-GemmImplementation<float, float>::with_estimate(
+GemmImplementation<float, float, float>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_ffhybrid_fp32_mla_6x16",
KernelWeightFormat::VL128_BL32,
@@ -385,7 +386,7 @@ GemmImplementation<float, float>::with_estimate(
"sgemm_8x6",
nullptr,
nullptr,
- [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, float, float>(args); }
+ [](const GemmArgs &args) { return new GemmInterleaved<sgemm_8x6, float, float, float>(args); }
},
#endif // __arm__
{
@@ -399,14 +400,14 @@ GemmImplementation<float, float>::with_estimate(
/* Templated function to return this list. */
template<>
-const GemmImplementation<float, float> *gemm_implementation_list<float, float>() {
+const GemmImplementation<float, float, float> *gemm_implementation_list<float, float, float>() {
return gemm_fp32_methods;
}
/* Explicitly instantiate the external functions for these types. */
-template UniqueGemmCommon<float, float> gemm<float, float, Nothing>(const GemmArgs &args, const Nothing &);
-template bool has_opt_gemm<float, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
-template KernelDescription get_gemm_method<float, float, Nothing>(const GemmArgs &args, const Nothing &);
-template std::vector<KernelDescription> get_compatible_kernels<float, float, Nothing> (const GemmArgs &args, const Nothing &);
+template UniqueGemmCommon<float, float, float> gemm<float, float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template bool has_opt_gemm<float, float, float, Nothing>(WeightFormat &weight_format, const GemmArgs &args, const Nothing &);
+template KernelDescription get_gemm_method<float, float, float, Nothing>(const GemmArgs &args, const Nothing &);
+template std::vector<KernelDescription> get_compatible_kernels<float, float, float, Nothing> (const GemmArgs &args, const Nothing &);
} // namespace arm_gemm