From a50f19346c5b79e2743f882ce0c691c07076f207 Mon Sep 17 00:00:00 2001 From: Pablo Marquez Tello Date: Mon, 8 Mar 2021 17:27:05 +0000 Subject: Updated cpu detection * Added the case in the cpu detection code for Klein cores * Added has_sve() and set_sve() methods in CpuInfo * Detection code checks for presence of SVE via HWCAP_SVE * Updated the heuristic in sve kernels to check for the absence of Klein * Resolves: COMPMID-4085 Change-Id: I0b8c72ff19dc5a3a81628d121a1afa836e724b4f Signed-off-by: Pablo Marquez Tello Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5257 Reviewed-by: Georgios Pinitas Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp | 12 ++++++------ src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp | 8 ++++---- src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp | 24 ++++++++++++------------ src/core/NEON/kernels/arm_gemm/gemm_int8.cpp | 16 ++++++++-------- src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp | 24 ++++++++++++------------ src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp | 22 +++++++++++----------- src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp | 16 ++++++++-------- 7 files changed, 61 insertions(+), 61 deletions(-) (limited to 'src/core/NEON') diff --git a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp index 96b9734221..d8134c4bb5 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_bf16.cpp @@ -49,22 +49,22 @@ static const GemmImplementation gemm_bf16_methods[] = { // gemm_bf16_interleaved GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_bf16fp32_mmla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_bf16fp32_dot_6x4VL", - nullptr, - [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)); }, + [](const GemmArgs &args) { return args._ci->has_sve(); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && ((args._Ksize <= 128) && (args._Nsize <= 128)); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, { // gemm_bf16_interleaved GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_bf16fp32_dot_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>2); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>2); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, # endif // SVE diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp index 93563a63d0..8e355c8f2c 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp @@ -47,15 +47,15 @@ static const GemmImplementation<__fp16, __fp16> gemm_fp16_methods[] = { { GemmMethod::GEMM_HYBRID, "sve_hybrid_fp16_mla_6x4VL", - nullptr, - [](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); }, + [](const GemmArgs &args) { return args._ci->has_sve(); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8))); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_fp16_mla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize > 4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize > 4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp index b0e912d188..5c894c01c8 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp @@ -62,8 +62,8 @@ static const GemmImplementation gemm_fp32_methods[] = { GemmMethod::GEMM_HYBRID, "sve_gemv_fp32_mla_8VL", - [](const GemmArgs &args) { return args._Msize==1 && args._nbatches==1 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && args._Msize==1 && args._nbatches==1 && !args._indirect_input; }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemvPretransposed(args); } }, #endif @@ -80,8 +80,8 @@ static const GemmImplementation gemm_fp32_methods[] = { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_fp32_mmla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif // __ARM_FEATURE_SVE && MMLA_FP32 @@ -91,22 +91,22 @@ static const GemmImplementation gemm_fp32_methods[] = { GemmMethod::GEMM_HYBRID, "sve_smallK_hybrid_fp32_mla_8x1VL", - [](const GemmArgs &args) { return args._Ksize <= 24 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize <= 24 && !args._indirect_input; }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmHybrid(args); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_fp32_mla_8x1VL", - nullptr, - [](const GemmArgs &args) { return (args._Nsize < 12); }, + [](const GemmArgs &args) { return args._ci->has_sve(); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (args._Nsize < 12); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_fp32_mla_6x4VL", - nullptr, - [](const GemmArgs &args) { return ((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); }, + [](const GemmArgs &args) { return args._ci->has_sve(); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (((args._Ksize <= 256) && (args._Nsize <= 256)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8))); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, #endif // __ARM_FEATURE_SVE @@ -144,8 +144,8 @@ GemmImplementation::with_estimate( { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_fp32_mla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif // __ARM_FEATURE_SVE diff --git a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp index f081558c40..60cf82f9c6 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_int8.cpp @@ -51,30 +51,30 @@ static const GemmImplementation gemm_s8_methods[] = { { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_mmla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>8); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif { GemmMethod::GEMM_HYBRID, "sve_smallK_hybrid_s8s32_dot_8x1VL", - [](const GemmArgs &args) { return args._Ksize<=64 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmHybrid(args); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_s8s32_dot_6x4VL", - [](const GemmArgs &args) { return args._Ksize>=16; }, - [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); }, + [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize>=16; }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8))); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_dot_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif // SVE diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp index d3a55eba6b..094b6fdff4 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp @@ -58,46 +58,46 @@ static const GemmImplementation gemm_qint8_methods { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_mmla_8x3VL", - [](const GemmArgs &args, const Requantize32 &) { return (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>8); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, #endif { GemmMethod::GEMM_HYBRID_QUANTIZED, "sve_smallK_hybrid_s8s32_dot_8x1VL", - [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized(args, qp); } }, #ifdef SVE2 { GemmMethod::GEMM_HYBRID, "sve_hybrid_s8qs_dot_6x4VL", - [](const GemmArgs &, const Requantize32 &qp) { return quant_hybrid_symmetric(qp); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve() && quant_hybrid_symmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_s8qa_dot_4x4VL", - [](const GemmArgs &, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } }, #endif { GemmMethod::GEMM_HYBRID, "sve_hybrid_s8s32_dot_6x4VL", - nullptr, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } }, { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_dot_8x3VL", - [](const GemmArgs &args, const Requantize32 &) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, #endif // SVE diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp index 9720c7d06e..be27b3a117 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp @@ -55,39 +55,39 @@ static const GemmImplementation gemm_quint8_meth { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_mmla_8x3VL", - [](const GemmArgs &args, const Requantize32 &) { return (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>8); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, #endif { GemmMethod::GEMM_HYBRID_QUANTIZED, "sve_smallK_hybrid_u8u32_dot_8x1VL", - [](const GemmArgs &args, const Requantize32 &) { return args._Ksize<=64 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized(args, qp); } }, #ifdef SVE2 // Requantizing kernels include some SVE2 only instructions (SQRDMULH, SRSHL) { GemmMethod::GEMM_HYBRID, "sve_hybrid_u8qa_dot_4x4VL", - [](const GemmArgs &, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } }, #endif { GemmMethod::GEMM_HYBRID, "sve_hybrid_u8u32_dot_6x4VL", - nullptr, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } }, { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_dot_8x3VL", - [](const GemmArgs &args, const Requantize32 &) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, #endif @@ -96,7 +96,7 @@ static const GemmImplementation gemm_quint8_meth GemmMethod::GEMM_INTERLEAVED, "a64_interleaved_u8u32_mmla_8x12", [](const GemmArgs &args, const Requantize32 &) { return (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, #endif diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp index d2ebe00f3b..fb41a9fc09 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp @@ -51,30 +51,30 @@ static const GemmImplementation gemm_u8_methods[] = { { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_mmla_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args) { args._ci->has_sve() && return (args._Ksize>8); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif { GemmMethod::GEMM_HYBRID, "smallK_hybrid_u8u32_dot_8x1VL", - [](const GemmArgs &args) { return args._Ksize<=64 && !args._indirect_input; }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmHybrid(args); } }, { GemmMethod::GEMM_HYBRID, "sve_hybrid_u8u32_dot_6x4VL", - nullptr, - [](const GemmArgs &args) { return ((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8)); }, + [](const GemmArgs &args) { return args._ci->has_sve(); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN && (((args._Ksize <= 128) && (args._Nsize <= 128)) || ((args._nmulti > 1) && ((args._Msize / args._maxthreads) < 8))); }, [](const GemmArgs &args) { return new GemmHybridIndirect(args); } }, { GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_dot_8x3VL", - [](const GemmArgs &args) { return (args._Ksize>4); }, - nullptr, + [](const GemmArgs &args) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, [](const GemmArgs &args) { return new GemmInterleaved(args); } }, #endif -- cgit v1.2.1