diff options
Diffstat (limited to 'src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp')
-rw-r--r-- | src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp | 116 |
1 files changed, 79 insertions, 37 deletions
diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp index 985567f6f3..aa62815438 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp @@ -29,15 +29,21 @@ #include "kernels/a64_gemm_s8_4x4.hpp" #include "kernels/a64_gemm_s8_8x12.hpp" #include "kernels/a64_hybrid_s8qa_dot_4x16.hpp" +#include "kernels/a64_hybrid_s8qa_mmla_4x16.hpp" #include "kernels/a64_hybrid_s8qs_dot_6x16.hpp" +#include "kernels/a64_hybrid_s8qs_mmla_6x16.hpp" #include "kernels/a64_hybrid_s8s32_dot_6x16.hpp" +#include "kernels/a64_hybrid_s8s32_mmla_6x16.hpp" #include "kernels/a64_interleaved_s8s32_mmla_8x12.hpp" #include "kernels/a64_smallK_hybrid_s8s32_dot_6x4.hpp" #include "kernels/a64_smallK_hybrid_s8s32_dot_8x4.hpp" -#include "kernels/sve_hybrid_s8s32_dot_6x4VL.hpp" #include "kernels/sve_hybrid_s8qa_dot_4x4VL.hpp" +#include "kernels/sve_hybrid_s8qa_mmla_4x4VL.hpp" #include "kernels/sve_hybrid_s8qs_dot_6x4VL.hpp" +#include "kernels/sve_hybrid_s8qs_mmla_6x4VL.hpp" +#include "kernels/sve_hybrid_s8s32_dot_6x4VL.hpp" +#include "kernels/sve_hybrid_s8s32_mmla_6x4VL.hpp" #include "kernels/sve_interleaved_s8s32_dot_8x3VL.hpp" #include "kernels/sve_interleaved_s8s32_mmla_8x3VL.hpp" #include "kernels/sve_smallK_hybrid_s8s32_dot_8x1VL.hpp" @@ -54,62 +60,98 @@ namespace arm_gemm { static const GemmImplementation<int8_t, int8_t, Requantize32> gemm_qint8_methods[] = { #ifdef ARM_COMPUTE_ENABLE_SVE -#ifdef ARM_COMPUTE_ENABLE_I8MM -{ +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_HYBRID, + "sve_hybrid_s8qa_mmla_4x4VL", + [](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_asymmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_mmla_4x4VL, int8_t, int8_t, Requantize32>(args, qp); } +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_HYBRID, + "sve_hybrid_s8qs_mmla_6x4VL", + [](const GemmArgs &args, const Requantize32 &qp) { return quant_hybrid_symmetric(qp) && args._ci->has_sve2() && args._ci->has_svei8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_mmla_6x4VL, int8_t, int8_t, Requantize32>(args, qp); } +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_mmla_8x3VL", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm() && (args._Ksize>8); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_mmla_8x3VL, int8_t, int8_t>(args, qp); } -}, -#endif // ARM_COMPUTE_ENABLE_I8MM +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "sve_hybrid_s8s32_mmla_6x4VL", + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_svei8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_mmla_6x4VL, int8_t, int8_t, Requantize32, true>(args, qp); } +), { GemmMethod::GEMM_HYBRID_QUANTIZED, "sve_smallK_hybrid_s8s32_dot_8x1VL", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && args._Ksize<=64 && !args._indirect_input; }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + nullptr, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridQuantized<cls_sve_smallK_hybrid_s8s32_dot_8x1VL, int8_t, int8_t>(args, qp); } }, -#ifdef ARM_COMPUTE_ENABLE_SVE2 -{ +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "sve_hybrid_s8qs_dot_6x4VL", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_symmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qs_dot_6x4VL, int8_t, int8_t, Requantize32>(args, qp); } -}, -{ +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "sve_hybrid_s8qa_dot_4x4VL", - [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_sve2() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8qa_dot_4x4VL, int8_t, int8_t, Requantize32>(args, qp); } -}, -#endif // ARM_COMPUTE_ENABLE_SVE2 -{ +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "sve_hybrid_s8s32_dot_6x4VL", - [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_sve_hybrid_s8s32_dot_6x4VL, int8_t, int8_t, Requantize32, true>(args, qp); } -}, -{ +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "sve_interleaved_s8s32_dot_8x3VL", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_sve() && (args._Ksize>4); }, - [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() != CPUModel::KLEIN; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_sve_interleaved_s8s32_dot_8x3VL, int8_t, int8_t>(args, qp); } -}, -#endif // SVE -#ifdef ARM_COMPUTE_ENABLE_I8MM -{ +), +#endif // ARM_COMPUTE_ENABLE_SVE +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_HYBRID, + "a64_hybrid_s8qa_mmla_4x16", + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_asymmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_mmla_4x16, int8_t, int8_t, Requantize32>(args, qp); } +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_HYBRID, + "a64_hybrid_s8qs_mmla_6x16", + [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_i8mm() && quant_hybrid_symmetric(qp); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_mmla_6x16, int8_t, int8_t, Requantize32>(args, qp); } +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_interleaved_s8s32_mmla_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm() && (args._Ksize>8); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_interleaved_s8s32_mmla_8x12, int8_t, int8_t>(args, qp); } -}, -#endif // ARM_COMPUTE_ENABLE_I8MM +), +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( + GemmMethod::GEMM_INTERLEAVED, + "a64_hybrid_s8s32_mmla_6x16", + [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_i8mm(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); }, + [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_mmla_6x16, int8_t, int8_t, Requantize32, true>(args, qp); } +), { GemmMethod::GEMM_HYBRID_QUANTIZED, "a64_smallK_hybrid_s8s32_dot_8x4", @@ -135,42 +177,42 @@ GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8qs_dot_6x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_symmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, Requantize32>::estimate_cycles(args, cls_a64_hybrid_s8qs_dot_6x16::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qs_dot_6x16, int8_t, int8_t, Requantize32>(args, qp); } ), GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8qa_dot_4x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, Requantize32>::estimate_cycles(args, cls_a64_hybrid_s8qa_dot_4x16::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, Requantize32>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8qa_dot_4x16, int8_t, int8_t, Requantize32>(args, qp); } ), GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8s32_dot_6x16", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, Requantize32, true>::estimate_cycles(args, cls_a64_hybrid_s8s32_dot_6x16::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, Requantize32, true>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect<cls_a64_hybrid_s8s32_dot_6x16, int8_t, int8_t, Requantize32, true>(args, qp); } ), GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_s8_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t>::estimate_cycles(args, cls_a64_gemm_s8_8x12::get_performance_parameters(args._ci)); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_8x12, int8_t, int8_t>(args, qp); } ), -{ +GemmImplementation<int8_t, int8_t, Requantize32>::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_s8_4x4", nullptr, - [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t>::estimate_cycles<int8_t>(args); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized<cls_a64_gemm_s8_4x4, int8_t, int8_t>(args, qp); } -}, +), { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", [](const GemmArgs &args, const Requantize32 &) { return !args._indirect_input; }, - [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, + [](const GemmArgs &, const Requantize32 &) { return false; }, [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper<int8_t, int8_t, int32_t>(args, qp); } }, { |