aboutsummaryrefslogtreecommitdiff
path: root/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp')
-rw-r--r--src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp39
1 files changed, 14 insertions, 25 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
index b41d8dd097..01976132ed 100644
--- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
+++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp
@@ -23,7 +23,7 @@
*/
// This can only be built if the target/compiler supports FP16 arguments.
-#ifdef __ARM_FP16_ARGS
+#if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))
#include "arm_gemm.hpp"
@@ -43,48 +43,37 @@
namespace arm_gemm {
static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = {
-#if defined(ARM_COMPUTE_ENABLE_SVE)
-{
+#ifdef ARM_COMPUTE_ENABLE_SVE
+GemmImplementation<__fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"sve_hybrid_fp16_mla_6x4VL",
[](const GemmArgs &args) { return args._ci->has_sve(); },
- [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8))); },
- [](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>(args); }
-},
-{
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
+ [](const GemmArgs &args) { return new GemmHybridIndirect<cls_sve_hybrid_fp16_mla_6x4VL, __fp16, __fp16>(args); }
+),
+GemmImplementation<__fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"sve_interleaved_fp16_mla_8x3VL",
[](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize > 4); },
- [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; },
+ [](const GemmArgs &args) { return GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_sve_interleaved_fp16_mla_8x3VL, __fp16, __fp16>(args); }
-},
-#endif
-
-#if defined(__aarch64__) && (defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) || defined(FP16_KERNELS))
+),
+#endif // ARM_COMPUTE_ENABLE_SVE
+#if defined(__aarch64__)
GemmImplementation<__fp16, __fp16>::with_estimate(
GemmMethod::GEMM_HYBRID,
"a64_hybrid_fp16_mla_6x32",
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
[](const GemmArgs &args) { return args._ci->has_fp16(); },
-#else
- nullptr,
-#endif
- [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>::estimate_cycles(args, cls_a64_hybrid_fp16_mla_6x32::get_performance_parameters(args._ci)); },
+ [](const GemmArgs &args) { return GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmHybridIndirect<cls_a64_hybrid_fp16_mla_6x32, __fp16, __fp16>(args); }
),
GemmImplementation<__fp16, __fp16>::with_estimate(
GemmMethod::GEMM_INTERLEAVED,
"a64_hgemm_8x24",
-#ifndef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
[](const GemmArgs &args) { return args._ci->has_fp16(); },
-#else
- nullptr,
-#endif
- [](const GemmArgs &args) { return GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>::estimate_cycles(args, cls_a64_hgemm_8x24::get_performance_parameters(args._ci)); },
+ [](const GemmArgs &args) { return GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>::estimate_cycles<__fp16>(args); },
[](const GemmArgs &args) { return new GemmInterleaved<cls_a64_hgemm_8x24, __fp16, __fp16>(args); }
),
-#endif // aarch64 && FP16
-#ifdef __aarch64__
{
GemmMethod::GEMM_INTERLEAVED,
"a64_sgemm_8x12",
@@ -124,4 +113,4 @@ template std::vector<KernelDescription> get_compatible_kernels<__fp16, __fp16, N
} // namespace arm_gemm
-#endif // __ARM_FP16_ARGS
+#endif // defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC))