From 4ee8b1599dbaf7634d25607fa5ac96ba3dc6b0f2 Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Fri, 16 Jul 2021 16:16:43 +0100 Subject: Update GEMM assembly kernels - Introduce Fp32 kernels with internal calculations in Bfloat16 when fast_mode is enabled - Improve kernel selection heuristics Signed-off-by: Georgios Pinitas Change-Id: I68a9e7e862b6fd2721b46e0d7cc791091c4ab279 Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5965 Tested-by: Arm Jenkins Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp | 94 ++++++++++++++++---------- 1 file changed, 60 insertions(+), 34 deletions(-) (limited to 'src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp') diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp index f3f2f335fd..abd2799583 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp @@ -29,13 +29,17 @@ #include "kernels/a64_gemm_u8_4x4.hpp" #include "kernels/a64_gemm_u8_8x12.hpp" #include "kernels/a64_hybrid_u8qa_dot_4x16.hpp" +#include "kernels/a64_hybrid_u8qa_mmla_4x16.hpp" #include "kernels/a64_hybrid_u8u32_dot_6x16.hpp" +#include "kernels/a64_hybrid_u8u32_mmla_6x16.hpp" #include "kernels/a64_interleaved_u8u32_mmla_8x12.hpp" #include "kernels/a64_smallK_hybrid_u8u32_dot_6x4.hpp" #include "kernels/a64_smallK_hybrid_u8u32_dot_8x4.hpp" -#include "kernels/sve_hybrid_u8u32_dot_6x4VL.hpp" #include "kernels/sve_hybrid_u8qa_dot_4x4VL.hpp" +#include "kernels/sve_hybrid_u8qa_mmla_4x4VL.hpp" +#include "kernels/sve_hybrid_u8u32_dot_6x4VL.hpp" +#include "kernels/sve_hybrid_u8u32_mmla_6x4VL.hpp" #include "kernels/sve_interleaved_u8u32_dot_8x3VL.hpp" #include "kernels/sve_interleaved_u8u32_mmla_8x3VL.hpp" #include "kernels/sve_smallK_hybrid_u8u32_dot_8x1VL.hpp" @@ -51,55 +55,77 @@ namespace arm_gemm { static const GemmImplementation gemm_quint8_methods[] = { #ifdef ARM_COMPUTE_ENABLE_SVE -#ifdef ARM_COMPUTE_ENABLE_I8MM -{ +GemmImplementation::with_estimate( + GemmMethod::GEMM_HYBRID, + "sve_hybrid_u8qa_mmla_4x4VL", + [](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } +), +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_mmla_8x3VL", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm() && (args._Ksize>8); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, -#endif +), +GemmImplementation::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "sve_hybrid_u8u32_mmla_6x4VL", + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } +), { GemmMethod::GEMM_HYBRID_QUANTIZED, "sve_smallK_hybrid_u8u32_dot_8x1VL", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + nullptr, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized(args, qp); } }, -#ifdef ARM_COMPUTE_ENABLE_SVE2 // Requantizing kernels include some SVE2 only instructions (SQRDMULH, SRSHL) -{ +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "sve_hybrid_u8qa_dot_4x4VL", - [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -#endif // ARM_COMPUTE_ENABLE_SVE2 -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "sve_hybrid_u8u32_dot_6x4VL", - [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_u8u32_dot_8x3VL", - [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, -#endif -#ifdef ARM_COMPUTE_ENABLE_I8MM -{ +), +#endif // ARM_COMPUTE_ENABLE_SVE +GemmImplementation::with_estimate( + GemmMethod::GEMM_HYBRID, + "a64_hybrid_u8qa_mmla_4x16", + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } +), +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_interleaved_u8u32_mmla_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm() && (args._Ksize>8); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, -#endif +), +GemmImplementation::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "a64_hybrid_u8u32_mmla_6x16", + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } +), { GemmMethod::GEMM_HYBRID_QUANTIZED, "a64_smallK_hybrid_u8u32_dot_8x4", @@ -125,35 +151,35 @@ GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_u8qa_dot_4x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_u8qa_dot_4x16::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } ), GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_u8u32_dot_6x16", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_u8u32_dot_6x16::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } ), GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_u8_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args, cls_a64_gemm_u8_8x12::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } ), -{ +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_u8_4x4", nullptr, - [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, +), { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", [](const GemmArgs &args, const Requantize32 &) { return !args._indirect_input; }, - [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, + [](const GemmArgs &, const Requantize32 &) { return false; }, [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper(args, qp); } }, { -- cgit v1.2.1