From 33e03074c36d85de87e9032a2583b04ce8ddcd6b Mon Sep 17 00:00:00 2001 From: Georgios Pinitas Date: Thu, 14 Jan 2021 13:43:40 +0000 Subject: Cycle estimate-based kernel selection for dot product quantized s8/u8 kernels Resolves: COMPMID-3990 Signed-off-by: Georgios Pinitas Change-Id: If840c79209940535450f4ea1cbf6b0ec646a168e Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4866 Tested-by: Arm Jenkins Reviewed-by: Michele Di Giorgio Comments-Addressed: Arm Jenkins --- src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp | 1 - src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp | 1 - .../NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp | 27 +- .../NEON/kernels/arm_gemm/gemm_implementation.hpp | 14 + src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp | 28 +- src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp | 22 +- src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp | 1 - .../NEON/kernels/arm_gemm/interleave_indirect.cpp | 16 +- .../kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp | 13 +- .../kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp | 13 +- .../kernels/a64_hybrid_bf16fp32_dot_6x16.hpp | 14 +- .../a64_hybrid_bf16fp32_dot_6x16/generic.cpp | 1186 +++++++++----------- .../arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp | 19 +- .../kernels/a64_hybrid_fp16_mla_6x32/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp | 23 +- .../kernels/a64_hybrid_fp32_mla_6x16/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp | 14 +- .../kernels/a64_hybrid_fp32_mla_8x4/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp | 24 +- .../kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp | 24 +- .../kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp | 24 +- .../kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp | 24 +- .../kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp | 4 +- .../arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp | 24 +- .../kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp | 4 +- .../kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp | 3 +- .../kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp | 14 +- .../sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp | 55 +- .../arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp | 14 +- .../kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp | 55 +- .../arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp | 14 +- .../kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp | 55 +- .../arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp | 14 +- .../kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp | 80 +- .../arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp | 14 +- .../kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp | 64 +- .../arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp | 14 +- .../kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp | 55 +- .../kernels/sve_hybrid_s8s32_dot_6x4VL.hpp | 14 +- .../kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp | 55 +- .../arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp | 14 +- .../kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp | 62 +- .../kernels/sve_hybrid_u8u32_dot_6x4VL.hpp | 14 +- .../kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp | 55 +- 47 files changed, 997 insertions(+), 1212 deletions(-) diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp index 75524fff97..93563a63d0 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp16.cpp @@ -32,7 +32,6 @@ #include "gemm_hybrid_indirect.hpp" #include "gemm_implementation.hpp" #include "gemm_interleaved.hpp" -#include "gemm_interleaved_pretransposed_2d.hpp" #include "kernels/a32_sgemm_8x6.hpp" #include "kernels/a64_hgemm_8x24.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp index e9e335f500..c1e34d12f7 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_fp32.cpp @@ -27,7 +27,6 @@ #include "gemm_hybrid_indirect.hpp" #include "gemm_implementation.hpp" #include "gemm_interleaved.hpp" -#include "gemm_interleaved_pretransposed_2d.hpp" #include "gemv_batched.hpp" #include "gemv_pretransposed.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp index eede1a4f76..d9035c8917 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp @@ -573,7 +573,7 @@ public: } // Estimate cycles for given problem given provided parameters - static uint64_t estimate_cycles(const GemmArgs &args, const PerformanceParameters ¶ms) { + static uint64_t estimate_cycles(const GemmArgs &args, const PerformanceParameters ¶ms, const OutputStage &os = {} ) { // Note: Current hybrid kernels don't actually round up height (they // have paths for each possible height). Might need to make this // configurable in future. @@ -591,6 +591,31 @@ public: uint64_t total_cycles = mac_cycles; + // Quantizing kernels with separate quantize need to add in the extra stages. + if (std::is_same::value && SeparateQuantize) { + const Requantize32 *qp = reinterpret_cast(&os); + + // Row sums: need to consider each value in A (batch * multi * M * K)... + uint64_t rowsum_bytes = static_cast(args._nbatches) * args._nmulti * args._Msize * roundup(args._Ksize, strategy::k_unroll()); + + // ... but row sums are skipped if B offset==0. + if (qp->b_offset == 0) { + rowsum_bytes = 0; + } + + // Use "prepare bytes per cycle" to store "row sum values per cycle". + float rowsum_cycles = static_cast(rowsum_bytes) / params.prepare_bytes_cycle; + + // Requantize: need to consider each value in C (batch * multi * M * N) + uint64_t requantize_bytes = static_cast(args._nbatches) * args._nmulti * args._Msize * args._Nsize; + + // Use "merge bytes per cycle" to store "requantize values per cycle". + float requantize_cycles = static_cast(requantize_bytes) / params.merge_bytes_cycle; + + // Recalculate total_cycles with the extra components. + total_cycles = mac_cycles + rowsum_cycles + requantize_cycles; + } + return total_cycles; } diff --git a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp index f6a0fc5d52..d3857a50e7 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_implementation.hpp @@ -61,9 +61,23 @@ struct GemmImplementation { return instantiate(args, os); } + static GemmImplementation with_estimate(GemmMethod m, const char *n, + std::function is_supported, std::function cycle_estimate, + std::function *(const GemmArgs &, const OutputStage &)> instantiate) { + GemmImplementation impl(m,n); + + impl.is_supported=is_supported; + impl.cycle_estimate=cycle_estimate; + impl.instantiate=instantiate; + + return impl; + } + GemmImplementation(const GemmImplementation &) = default; GemmImplementation & operator= (const GemmImplementation &) = default; + GemmImplementation(GemmMethod m, const char * n) : method(m), name(n) {} + GemmImplementation(GemmMethod m, const char *n, std::function is_supported, std::function is_recommended, std::function *(const GemmArgs &, const OutputStage &)> instantiate) : diff --git a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp index 8d9fee6da4..d3a55eba6b 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_qint8.cpp @@ -131,46 +131,46 @@ static const GemmImplementation gemm_qint8_methods [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() == CPUModel::A53 && ((args._Msize > 28) || ((args._Msize % 8) > 4)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, -{ +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8qs_dot_6x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_symmetric(qp); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_s8qs_dot_6x16::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8qa_dot_4x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_s8qa_dot_4x16::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_s8s32_dot_6x16", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_s8s32_dot_6x16::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_s8_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args, cls_a64_gemm_s8_8x12::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, +), { GemmMethod::GEMM_INTERLEAVED, "a64_gemm_s8_4x4", nullptr, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", [](const GemmArgs &args, const Requantize32 &) { return !args._indirect_input; }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper(args, qp); } }, { diff --git a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp index eead592d1f..9720c7d06e 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_quint8.cpp @@ -121,39 +121,39 @@ static const GemmImplementation gemm_quint8_meth [](const GemmArgs &args, const Requantize32 &) { return args._ci->get_cpu_model() == CPUModel::A53 && args._Msize > 4; }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); }, }, -{ +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_u8qa_dot_4x16", [](const GemmArgs &args, const Requantize32 &qp) { return args._ci->has_dotprod() && quant_hybrid_asymmetric(qp); }, - [](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_u8qa_dot_4x16::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_HYBRID, "a64_hybrid_u8u32_dot_6x16", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - [](const GemmArgs &args, const Requantize32 &) { return args._Nsize<=256 && args._Ksize>128; }, + [](const GemmArgs &args, const Requantize32 &) { return GemmHybridIndirect::estimate_cycles(args, cls_a64_hybrid_u8u32_dot_6x16::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmHybridIndirect(args, qp); } -}, -{ +), +GemmImplementation::with_estimate( GemmMethod::GEMM_INTERLEAVED, "a64_gemm_u8_8x12", [](const GemmArgs &args, const Requantize32 &) { return args._ci->has_dotprod(); }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return GemmInterleavedQuantized::estimate_cycles(args, cls_a64_gemm_u8_8x12::get_performance_parameters(args._ci)); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } -}, +), { GemmMethod::GEMM_INTERLEAVED, "a64_gemm_u8_4x4", nullptr, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, [](const GemmArgs &args, const Requantize32 &qp) { return new GemmInterleavedQuantized(args, qp); } }, { GemmMethod::QUANTIZE_WRAPPER, "quantized_wrapper", [](const GemmArgs &args, const Requantize32 &) { return !args._indirect_input; }, - nullptr, + [](const GemmArgs &args, const Requantize32 &) { return !args._ci->has_dotprod(); }, [](const GemmArgs &args, const Requantize32 &qp) { return new QuantizeWrapper(args, qp); } }, { diff --git a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp index 7d24ea68d6..d2ebe00f3b 100644 --- a/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp +++ b/src/core/NEON/kernels/arm_gemm/gemm_uint8.cpp @@ -27,7 +27,6 @@ #include "gemm_common.hpp" #include "gemm_implementation.hpp" #include "gemm_interleaved.hpp" -#include "gemm_interleaved_pretransposed_2d.hpp" #include "gemm_hybrid.hpp" #include "gemm_hybrid_indirect.hpp" diff --git a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp index 2b3e170a3b..02d9486cc6 100644 --- a/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp +++ b/src/core/NEON/kernels/arm_gemm/interleave_indirect.cpp @@ -314,27 +314,25 @@ template void Interleave<6, 1, VLType::None>(float *, const bfloat16 *, size_t, /* AArch64 */ #ifdef __aarch64__ -/* FP64 */ -/* NEON/SVE implementation (height 8) */ -template void IndirectInterleave<8, 1, VLType::None>(double *, const double * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); -template void ConvolutionInterleave<8, 1, VLType::None>(double *, const double *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); -template void Interleave<8, 1, VLType::None>(double *, const double *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); - /* FP32 */ /* NEON/SVE implementation (height 8) */ template void IndirectInterleave<8, 1, VLType::None>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 1, VLType::None>(float *, const float *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 1, VLType::None>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#if defined(__ARM_FEATURE_SVE) && defined(MMLA_FP32) /* FMMLA */ template void IndirectInterleave<8, 2, VLType::None>(float *, const float * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 2, VLType::None>(float *, const float *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 2, VLType::None>(float *, const float *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#endif // SVE && MMLA_FP32 /* FP16 */ +#if defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) template void IndirectInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 1, VLType::None>(__fp16 *, const __fp16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#endif // FP16_KERNELS ar __ARM_FEATURE_FP16_VECTOR_ARITHMETIC template void IndirectInterleave<8, 1, VLType::None>(float *, const __fp16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 1, VLType::None>(float *, const __fp16 *, size_t, const convolver<__fp16> &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); @@ -342,6 +340,7 @@ template void Interleave<8, 1, VLType::None>(float *, const __fp16 *, size_t, un /* BF16 */ /* NEON/SVE BFDOT */ +#ifdef V8P6_BF template void IndirectInterleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); @@ -349,6 +348,7 @@ template void Interleave<8, 2, VLType::None>(bfloat16 *, const bfloat16 *, size_ template void IndirectInterleave<8, 4, VLType::None>(bfloat16 *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void ConvolutionInterleave<8, 4, VLType::None>(bfloat16 *, const bfloat16 *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 4, VLType::None>(bfloat16 *, const bfloat16 *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#endif // V8P6_BF /* NEON/SVE using FP32 kernel */ template void IndirectInterleave<8, 1, VLType::None>(float *, const bfloat16 * const * const *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); @@ -375,10 +375,12 @@ template void IndirectInterleave<8, 4, VLType::None>(int8_t *, const int8_t * co template void ConvolutionInterleave<8, 4, VLType::None>(int8_t *, const int8_t *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 4, VLType::None>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#ifdef MMLA_INT8 /* MMLA SMMLA (height 8, block 8) */ template void IndirectInterleave<8, 8, VLType::None>(int8_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t); template void ConvolutionInterleave<8, 8, VLType::None>(int8_t *, const int8_t *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 8, VLType::None>(int8_t *, const int8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#endif // MMLA_INT8 /* NEON SDOT (height 8, block 1) */ template void IndirectInterleave<8, 1, VLType::None>(int16_t *, const int8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t); @@ -395,10 +397,12 @@ template void IndirectInterleave<8, 4, VLType::None>(uint8_t *, const uint8_t * template void ConvolutionInterleave<8, 4, VLType::None>(uint8_t *, const uint8_t *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 4, VLType::None>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#ifdef MMLA_INT8 /* MMLA SMMLA (height 8, block 8) */ template void IndirectInterleave<8, 8, VLType::None>(uint8_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t); template void ConvolutionInterleave<8, 8, VLType::None>(uint8_t *, const uint8_t *, size_t, const convolver &, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); template void Interleave<8, 8, VLType::None>(uint8_t *, const uint8_t *, size_t, unsigned int, unsigned int, unsigned int, unsigned int, bool, int32_t); +#endif // MMLA_INT8 /* NEON 16-bit (height 8, block 1) */ template void IndirectInterleave<8, 1, VLType::None>(uint16_t *, const uint8_t * const * const *, unsigned int, unsigned int, unsigned int y0, unsigned int ymax, unsigned int k0, unsigned int kmax, bool, int32_t); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp index eee817e8e7..7c7b894b08 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_s8_8x12.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,6 +27,7 @@ #include "arm_gemm.hpp" +#include "../performance_parameters.hpp" #include "../std_transforms_fixed.hpp" namespace arm_gemm { @@ -60,6 +61,16 @@ public: StdTransformsFixed transforms = {}; StdTransformsFixed transforms_quantized = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 15.361, 0.9341, 0.1636 }; + + default: + return { 29.0698, 3.9793, 0.4003 }; + } + } + kern_type kernel = a64_gemm_s8_8x12; cls_a64_gemm_s8_8x12(const CPUInfo *ci) { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp index 256ba2e08c..00ed5d03bf 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_gemm_u8_8x12.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,6 +25,7 @@ #ifdef __aarch64__ +#include "../performance_parameters.hpp" #include "../std_transforms_fixed.hpp" namespace arm_gemm { @@ -68,6 +69,16 @@ public: StdTransformsFixed transforms = {}; StdTransformsFixed transforms_quantized = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 15.361, 0.9341, 0.1636 }; + + default: + return { 29.0698, 3.9793, 0.4003 }; + } + } + kern_type kernel = a64_gemm_u8_8x12; cls_a64_gemm_u8_8x12(const CPUInfo *ci) { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp index a76c9949de..d390108b11 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,12 +28,12 @@ #include "../bfloat.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const bfloat16 *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const bfloat16 *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp index c820da3ef1..85944e9f6a 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_bf16fp32_dot_6x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -96,13 +96,13 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "1:" // Row loop "cmp %x[M], #0x6\n" - "bge 186f\n" + "bge 181f\n" "cmp %x[M], #0x4\n" - "bgt 149f\n" - "beq 112f\n" + "bgt 145f\n" + "beq 109f\n" "cmp %x[M], #0x2\n" - "bgt 75f\n" - "beq 38f\n" + "bgt 73f\n" + "beq 37f\n" "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "mov x14, %x[bias]\n" @@ -291,7 +291,7 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f60f8ca // bfdot v10.4s, v6.8h, v0.h[3]\n" ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n" "21:" // Height 1: Multiply loop: Main loop skip - "cbz x11, 26f\n" + "cbz x11, 25f\n" "cmp x11, #0x2\n" "blt 23f\n" "22:" // Height 1: Multiply loop: Odd block loop @@ -308,16 +308,10 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" "cmp x11, #0x2\n" "bge 22b\n" - "cbz x11, 26f\n" + "cbz x11, 25f\n" "23:" // Height 1: Multiply loop: Skip odd blocks - "tbz x11, #1, 24f\n" - "ldr s0, [x10], #0x4\n" - "tbz x11, #0, 25f\n" - "ld1 { v0.h }[2], [x10]\n" - "b 25f\n" - "24:" // Height 1: Multiply loop: Ragged operand read: partial_1_0 "ldr h0, [x10, #0x0]\n" - "25:" // Height 1: Multiply loop: Ragged operand read: Done + "24:" // Height 1: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -327,13 +321,13 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f40f0ca // bfdot v10.4s, v6.8h, v0.h[0]\n" "add x15, x15, #0x40\n" ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" - "26:" // Height 1: Multiply loop: No odd multiplies + "25:" // Height 1: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" "bne 16b\n" "prfm pstl1keep, [x13, #0x0]\n" - "tbz %x[flags], #1, 27f\n" + "tbz %x[flags], #1, 26f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -346,81 +340,81 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmax v9.4s, v9.4s, v1.4s\n" "fmax v10.4s, v10.4s, v1.4s\n" "fmax v11.4s, v11.4s, v1.4s\n" - "27:" // Height 1: No activation + "26:" // Height 1: No activation "cmp x16, #0x10\n" - "bge 36f\n" - "tbz x16, #3, 31f\n" + "bge 35f\n" + "tbz x16, #3, 30f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" - "tbz x16, #2, 29f\n" + "tbz x16, #2, 28f\n" "st1 { v10.4s }, [x13], #0x10\n" - "tbz x16, #1, 28f\n" + "tbz x16, #1, 27f\n" "str d11, [x13], #0x8\n" - "tbz x16, #0, 35f\n" + "tbz x16, #0, 34f\n" "st1 { v11.s }[2], [x13]\n" - "b 35f\n" - "28:" // Height 1: Partial direct writeback: partial_1_12 - "tbz x16, #0, 35f\n" + "b 34f\n" + "27:" // Height 1: Partial direct writeback: partial_1_12 + "tbz x16, #0, 34f\n" "str s11, [x13, #0x0]\n" - "b 35f\n" - "29:" // Height 1: Partial direct writeback: partial_2_8 - "tbz x16, #1, 30f\n" + "b 34f\n" + "28:" // Height 1: Partial direct writeback: partial_2_8 + "tbz x16, #1, 29f\n" "str d10, [x13], #0x8\n" - "tbz x16, #0, 35f\n" + "tbz x16, #0, 34f\n" "st1 { v10.s }[2], [x13]\n" - "b 35f\n" - "30:" // Height 1: Partial direct writeback: partial_1_8 - "tbz x16, #0, 35f\n" + "b 34f\n" + "29:" // Height 1: Partial direct writeback: partial_1_8 + "tbz x16, #0, 34f\n" "str s10, [x13, #0x0]\n" - "b 35f\n" - "31:" // Height 1: Partial direct writeback: partial_4_0 - "tbz x16, #2, 33f\n" + "b 34f\n" + "30:" // Height 1: Partial direct writeback: partial_4_0 + "tbz x16, #2, 32f\n" "st1 { v8.4s }, [x13], #0x10\n" - "tbz x16, #1, 32f\n" + "tbz x16, #1, 31f\n" "str d9, [x13], #0x8\n" - "tbz x16, #0, 35f\n" + "tbz x16, #0, 34f\n" "st1 { v9.s }[2], [x13]\n" - "b 35f\n" - "32:" // Height 1: Partial direct writeback: partial_1_4 - "tbz x16, #0, 35f\n" + "b 34f\n" + "31:" // Height 1: Partial direct writeback: partial_1_4 + "tbz x16, #0, 34f\n" "str s9, [x13, #0x0]\n" - "b 35f\n" - "33:" // Height 1: Partial direct writeback: partial_2_0 - "tbz x16, #1, 34f\n" + "b 34f\n" + "32:" // Height 1: Partial direct writeback: partial_2_0 + "tbz x16, #1, 33f\n" "str d8, [x13], #0x8\n" - "tbz x16, #0, 35f\n" + "tbz x16, #0, 34f\n" "st1 { v8.s }[2], [x13]\n" - "b 35f\n" - "34:" // Height 1: Partial direct writeback: partial_1_0 + "b 34f\n" + "33:" // Height 1: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" - "35:" // Height 1: Partial direct writeback: Done - "b 37f\n" - "36:" // Height 1: Full writeback + "34:" // Height 1: Partial direct writeback: Done + "b 36f\n" + "35:" // Height 1: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" "str q11, [x13, #0x30]\n" "add x13, x13, #0x40\n" - "37:" // Height 1: Writeback done + "36:" // Height 1: Writeback done "subs x16, x16, #0x10\n" "bgt 3b\n" - "b 224f\n" - "38:" // Height 2 + "b 218f\n" + "37:" // Height 2 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" - "tbz %x[flags], #2, 39f\n" + "tbz %x[flags], #2, 38f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #2\n" "ldr x9, [%x[output_ptr], #0x8]\n" "add x9, x9, x19, LSL #2\n" - "b 40f\n" - "39:" // Height 2: setup direct output + "b 39f\n" + "38:" // Height 2: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #2\n" - "40:" // Height 2: Column loop - "cbz x14, 41f\n" + "39:" // Height 2: Column loop + "cbz x14, 40f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" @@ -430,84 +424,84 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "mov v14.16b, v10.16b\n" "add x14, x14, #0x40\n" "mov v15.16b, v11.16b\n" - "b 52f\n" - "41:" // Height 2: no bias - "tbz %x[flags], #0, 51f\n" + "b 51f\n" + "40:" // Height 2: no bias + "tbz %x[flags], #0, 50f\n" "cmp x16, #0x10\n" - "bge 50f\n" - "tbz x16, #3, 45f\n" + "bge 49f\n" + "tbz x16, #3, 44f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v9.4s }, [x13], #0x10\n" "ld1 { v13.4s }, [x9], #0x10\n" - "tbz x16, #2, 43f\n" + "tbz x16, #2, 42f\n" "ld1 { v10.4s }, [x13], #0x10\n" "ld1 { v14.4s }, [x9], #0x10\n" - "tbz x16, #1, 42f\n" + "tbz x16, #1, 41f\n" "mov x19, #0x38\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ld1 { v11.s }[2], [x13]\n" "ld1 { v15.s }[2], [x9]\n" - "b 49f\n" - "42:" // Height 2: Partial accumulate: partial_1_12 + "b 48f\n" + "41:" // Height 2: Partial accumulate: partial_1_12 "mov x19, #0x30\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ldr s11, [x13, #0x0]\n" "ldr s15, [x9, #0x0]\n" - "b 49f\n" - "43:" // Height 2: Partial accumulate: partial_2_8 - "tbz x16, #1, 44f\n" + "b 48f\n" + "42:" // Height 2: Partial accumulate: partial_2_8 + "tbz x16, #1, 43f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "mov x19, #0x28\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ld1 { v10.s }[2], [x13]\n" "ld1 { v14.s }[2], [x9]\n" - "b 49f\n" - "44:" // Height 2: Partial accumulate: partial_1_8 + "b 48f\n" + "43:" // Height 2: Partial accumulate: partial_1_8 "mov x19, #0x20\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ldr s10, [x13, #0x0]\n" "ldr s14, [x9, #0x0]\n" - "b 49f\n" - "45:" // Height 2: Partial accumulate: partial_4_0 - "tbz x16, #2, 47f\n" + "b 48f\n" + "44:" // Height 2: Partial accumulate: partial_4_0 + "tbz x16, #2, 46f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" - "tbz x16, #1, 46f\n" + "tbz x16, #1, 45f\n" "mov x19, #0x18\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ld1 { v9.s }[2], [x13]\n" "ld1 { v13.s }[2], [x9]\n" - "b 49f\n" - "46:" // Height 2: Partial accumulate: partial_1_4 + "b 48f\n" + "45:" // Height 2: Partial accumulate: partial_1_4 "mov x19, #0x10\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ldr s9, [x13, #0x0]\n" "ldr s13, [x9, #0x0]\n" - "b 49f\n" - "47:" // Height 2: Partial accumulate: partial_2_0 - "tbz x16, #1, 48f\n" + "b 48f\n" + "46:" // Height 2: Partial accumulate: partial_2_0 + "tbz x16, #1, 47f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "mov x19, #0x8\n" - "tbz x16, #0, 49f\n" + "tbz x16, #0, 48f\n" "ld1 { v8.s }[2], [x13]\n" "ld1 { v12.s }[2], [x9]\n" - "b 49f\n" - "48:" // Height 2: Partial accumulate: partial_1_0 + "b 48f\n" + "47:" // Height 2: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr s8, [x13, #0x0]\n" "ldr s12, [x9, #0x0]\n" - "49:" // Height 2: Partial accumulate: Done + "48:" // Height 2: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" - "b 52f\n" - "50:" // Height 2: full accumulate + "b 51f\n" + "49:" // Height 2: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" @@ -516,8 +510,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr q13, [x9, #0x10]\n" "ldr q14, [x9, #0x20]\n" "ldr q15, [x9, #0x30]\n" - "b 52f\n" - "51:" // Height 2: no accumulate + "b 51f\n" + "50:" // Height 2: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" @@ -526,31 +520,31 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "movi v13.16b, #0x0\n" "movi v14.16b, #0x0\n" "movi v15.16b, #0x0\n" - "52:" // Height 2: setup done + "51:" // Height 2: setup done "mov x12, #0x0\n" - "53:" // Height 2: String loop + "52:" // Height 2: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" - "tbz %x[flags], #3, 54f\n" + "tbz %x[flags], #3, 53f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" - "cbnz x12, 55f\n" + "cbnz x12, 54f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" - "b 55f\n" - "54:" // Height 2: setup direct input + "b 54f\n" + "53:" // Height 2: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" - "55:" // Height 2: input setup done + "54:" // Height 2: input setup done "cmp x11, #0x8\n" - "blt 58f\n" - "cmp x11, #0x10\n" "blt 57f\n" - "56:" // Height 2: Multiply loop: Main loop head + "cmp x11, #0x10\n" + "blt 56f\n" + "55:" // Height 2: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q6, [x15, #0x0]\n" @@ -608,8 +602,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f61f8ce // bfdot v14.4s, v6.8h, v1.h[3]\n" ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n" ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" - "bge 56b\n" - "57:" // Height 2: Multiply loop: Single iteration only + "bge 55b\n" + "56:" // Height 2: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" @@ -666,11 +660,11 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f61f8ce // bfdot v14.4s, v6.8h, v1.h[3]\n" ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n" ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" - "58:" // Height 2: Multiply loop: Main loop skip - "cbz x11, 63f\n" + "57:" // Height 2: Multiply loop: Main loop skip + "cbz x11, 61f\n" "cmp x11, #0x2\n" - "blt 60f\n" - "59:" // Height 2: Multiply loop: Odd block loop + "blt 59f\n" + "58:" // Height 2: Multiply loop: Odd block loop "ldr s0, [x10], #0x4\n" "ldr s1, [x28], #0x4\n" "ldr q6, [x15, #0x0]\n" @@ -688,20 +682,12 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n" ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" - "bge 59b\n" - "cbz x11, 63f\n" - "60:" // Height 2: Multiply loop: Skip odd blocks - "tbz x11, #1, 61f\n" - "ldr s0, [x10], #0x4\n" - "ldr s1, [x28], #0x4\n" - "tbz x11, #0, 62f\n" - "ld1 { v0.h }[2], [x10]\n" - "ld1 { v1.h }[2], [x28]\n" - "b 62f\n" - "61:" // Height 2: Multiply loop: Ragged operand read: partial_1_0 + "bge 58b\n" + "cbz x11, 61f\n" + "59:" // Height 2: Multiply loop: Skip odd blocks "ldr h0, [x10, #0x0]\n" "ldr h1, [x28, #0x0]\n" - "62:" // Height 2: Multiply loop: Ragged operand read: Done + "60:" // Height 2: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -715,14 +701,14 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f41f0ce // bfdot v14.4s, v6.8h, v1.h[0]\n" ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" - "63:" // Height 2: Multiply loop: No odd multiplies + "61:" // Height 2: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" - "bne 53b\n" + "bne 52b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" - "tbz %x[flags], #1, 64f\n" + "tbz %x[flags], #1, 62f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -743,72 +729,72 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmax v14.4s, v14.4s, v1.4s\n" "fmin v15.4s, v15.4s, v0.4s\n" "fmax v15.4s, v15.4s, v1.4s\n" - "64:" // Height 2: No activation + "62:" // Height 2: No activation "cmp x16, #0x10\n" - "bge 73f\n" - "tbz x16, #3, 68f\n" + "bge 71f\n" + "tbz x16, #3, 66f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v13.4s }, [x9], #0x10\n" - "tbz x16, #2, 66f\n" + "tbz x16, #2, 64f\n" "st1 { v10.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x9], #0x10\n" - "tbz x16, #1, 65f\n" + "tbz x16, #1, 63f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" - "tbz x16, #0, 72f\n" + "tbz x16, #0, 70f\n" "st1 { v11.s }[2], [x13]\n" "st1 { v15.s }[2], [x9]\n" - "b 72f\n" - "65:" // Height 2: Partial direct writeback: partial_1_12 - "tbz x16, #0, 72f\n" + "b 70f\n" + "63:" // Height 2: Partial direct writeback: partial_1_12 + "tbz x16, #0, 70f\n" "str s11, [x13, #0x0]\n" "str s15, [x9, #0x0]\n" - "b 72f\n" - "66:" // Height 2: Partial direct writeback: partial_2_8 - "tbz x16, #1, 67f\n" + "b 70f\n" + "64:" // Height 2: Partial direct writeback: partial_2_8 + "tbz x16, #1, 65f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" - "tbz x16, #0, 72f\n" + "tbz x16, #0, 70f\n" "st1 { v10.s }[2], [x13]\n" "st1 { v14.s }[2], [x9]\n" - "b 72f\n" - "67:" // Height 2: Partial direct writeback: partial_1_8 - "tbz x16, #0, 72f\n" + "b 70f\n" + "65:" // Height 2: Partial direct writeback: partial_1_8 + "tbz x16, #0, 70f\n" "str s10, [x13, #0x0]\n" "str s14, [x9, #0x0]\n" - "b 72f\n" - "68:" // Height 2: Partial direct writeback: partial_4_0 - "tbz x16, #2, 70f\n" + "b 70f\n" + "66:" // Height 2: Partial direct writeback: partial_4_0 + "tbz x16, #2, 68f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" - "tbz x16, #1, 69f\n" + "tbz x16, #1, 67f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" - "tbz x16, #0, 72f\n" + "tbz x16, #0, 70f\n" "st1 { v9.s }[2], [x13]\n" "st1 { v13.s }[2], [x9]\n" - "b 72f\n" - "69:" // Height 2: Partial direct writeback: partial_1_4 - "tbz x16, #0, 72f\n" + "b 70f\n" + "67:" // Height 2: Partial direct writeback: partial_1_4 + "tbz x16, #0, 70f\n" "str s9, [x13, #0x0]\n" "str s13, [x9, #0x0]\n" - "b 72f\n" - "70:" // Height 2: Partial direct writeback: partial_2_0 - "tbz x16, #1, 71f\n" + "b 70f\n" + "68:" // Height 2: Partial direct writeback: partial_2_0 + "tbz x16, #1, 69f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" - "tbz x16, #0, 72f\n" + "tbz x16, #0, 70f\n" "st1 { v8.s }[2], [x13]\n" "st1 { v12.s }[2], [x9]\n" - "b 72f\n" - "71:" // Height 2: Partial direct writeback: partial_1_0 + "b 70f\n" + "69:" // Height 2: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" "str s12, [x9, #0x0]\n" - "72:" // Height 2: Partial direct writeback: Done - "b 74f\n" - "73:" // Height 2: Full writeback + "70:" // Height 2: Partial direct writeback: Done + "b 72f\n" + "71:" // Height 2: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" @@ -819,29 +805,29 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "str q15, [x9, #0x30]\n" "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" - "74:" // Height 2: Writeback done + "72:" // Height 2: Writeback done "subs x16, x16, #0x10\n" - "bgt 40b\n" - "b 224f\n" - "75:" // Height 3 + "bgt 39b\n" + "b 218f\n" + "73:" // Height 3 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" - "tbz %x[flags], #2, 76f\n" + "tbz %x[flags], #2, 74f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #2\n" "ldr x9, [%x[output_ptr], #0x8]\n" "ldr x27, [%x[output_ptr], #0x10]\n" "add x9, x9, x19, LSL #2\n" "add x27, x27, x19, LSL #2\n" - "b 77f\n" - "76:" // Height 3: setup direct output + "b 75f\n" + "74:" // Height 3: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #2\n" "add x27, x9, x19, LSL #2\n" - "77:" // Height 3: Column loop - "cbz x14, 78f\n" + "75:" // Height 3: Column loop + "cbz x14, 76f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" @@ -855,101 +841,101 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "mov v15.16b, v11.16b\n" "mov v18.16b, v10.16b\n" "mov v19.16b, v11.16b\n" - "b 89f\n" - "78:" // Height 3: no bias - "tbz %x[flags], #0, 88f\n" + "b 87f\n" + "76:" // Height 3: no bias + "tbz %x[flags], #0, 86f\n" "cmp x16, #0x10\n" - "bge 87f\n" - "tbz x16, #3, 82f\n" + "bge 85f\n" + "tbz x16, #3, 80f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" "ld1 { v9.4s }, [x13], #0x10\n" "ld1 { v13.4s }, [x9], #0x10\n" "ld1 { v17.4s }, [x27], #0x10\n" - "tbz x16, #2, 80f\n" + "tbz x16, #2, 78f\n" "ld1 { v10.4s }, [x13], #0x10\n" "ld1 { v14.4s }, [x9], #0x10\n" "ld1 { v18.4s }, [x27], #0x10\n" - "tbz x16, #1, 79f\n" + "tbz x16, #1, 77f\n" "mov x19, #0x38\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ld1 { v11.s }[2], [x13]\n" "ld1 { v15.s }[2], [x9]\n" "ld1 { v19.s }[2], [x27]\n" - "b 86f\n" - "79:" // Height 3: Partial accumulate: partial_1_12 + "b 84f\n" + "77:" // Height 3: Partial accumulate: partial_1_12 "mov x19, #0x30\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ldr s11, [x13, #0x0]\n" "ldr s15, [x9, #0x0]\n" "ldr s19, [x27, #0x0]\n" - "b 86f\n" - "80:" // Height 3: Partial accumulate: partial_2_8 - "tbz x16, #1, 81f\n" + "b 84f\n" + "78:" // Height 3: Partial accumulate: partial_2_8 + "tbz x16, #1, 79f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "mov x19, #0x28\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ld1 { v10.s }[2], [x13]\n" "ld1 { v14.s }[2], [x9]\n" "ld1 { v18.s }[2], [x27]\n" - "b 86f\n" - "81:" // Height 3: Partial accumulate: partial_1_8 + "b 84f\n" + "79:" // Height 3: Partial accumulate: partial_1_8 "mov x19, #0x20\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ldr s10, [x13, #0x0]\n" "ldr s14, [x9, #0x0]\n" "ldr s18, [x27, #0x0]\n" - "b 86f\n" - "82:" // Height 3: Partial accumulate: partial_4_0 - "tbz x16, #2, 84f\n" + "b 84f\n" + "80:" // Height 3: Partial accumulate: partial_4_0 + "tbz x16, #2, 82f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" - "tbz x16, #1, 83f\n" + "tbz x16, #1, 81f\n" "mov x19, #0x18\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ld1 { v9.s }[2], [x13]\n" "ld1 { v13.s }[2], [x9]\n" "ld1 { v17.s }[2], [x27]\n" - "b 86f\n" - "83:" // Height 3: Partial accumulate: partial_1_4 + "b 84f\n" + "81:" // Height 3: Partial accumulate: partial_1_4 "mov x19, #0x10\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ldr s9, [x13, #0x0]\n" "ldr s13, [x9, #0x0]\n" "ldr s17, [x27, #0x0]\n" - "b 86f\n" - "84:" // Height 3: Partial accumulate: partial_2_0 - "tbz x16, #1, 85f\n" + "b 84f\n" + "82:" // Height 3: Partial accumulate: partial_2_0 + "tbz x16, #1, 83f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "mov x19, #0x8\n" - "tbz x16, #0, 86f\n" + "tbz x16, #0, 84f\n" "ld1 { v8.s }[2], [x13]\n" "ld1 { v12.s }[2], [x9]\n" "ld1 { v16.s }[2], [x27]\n" - "b 86f\n" - "85:" // Height 3: Partial accumulate: partial_1_0 + "b 84f\n" + "83:" // Height 3: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr s8, [x13, #0x0]\n" "ldr s12, [x9, #0x0]\n" "ldr s16, [x27, #0x0]\n" - "86:" // Height 3: Partial accumulate: Done + "84:" // Height 3: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" - "b 89f\n" - "87:" // Height 3: full accumulate + "b 87f\n" + "85:" // Height 3: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" @@ -962,8 +948,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr q17, [x27, #0x10]\n" "ldr q18, [x27, #0x20]\n" "ldr q19, [x27, #0x30]\n" - "b 89f\n" - "88:" // Height 3: no accumulate + "b 87f\n" + "86:" // Height 3: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" @@ -976,34 +962,34 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "movi v17.16b, #0x0\n" "movi v18.16b, #0x0\n" "movi v19.16b, #0x0\n" - "89:" // Height 3: setup done + "87:" // Height 3: setup done "mov x12, #0x0\n" - "90:" // Height 3: String loop + "88:" // Height 3: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" - "tbz %x[flags], #3, 91f\n" + "tbz %x[flags], #3, 89f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" - "cbnz x12, 92f\n" + "cbnz x12, 90f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" - "b 92f\n" - "91:" // Height 3: setup direct input + "b 90f\n" + "89:" // Height 3: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" - "92:" // Height 3: input setup done + "90:" // Height 3: input setup done "cmp x11, #0x8\n" - "blt 95f\n" + "blt 93f\n" "cmp x11, #0x10\n" - "blt 94f\n" - "93:" // Height 3: Multiply loop: Main loop head + "blt 92f\n" + "91:" // Height 3: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" @@ -1080,8 +1066,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n" ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" - "bge 93b\n" - "94:" // Height 3: Multiply loop: Single iteration only + "bge 91b\n" + "92:" // Height 3: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" @@ -1157,11 +1143,11 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f60f8eb // bfdot v11.4s, v7.8h, v0.h[3]\n" ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" - "95:" // Height 3: Multiply loop: Main loop skip - "cbz x11, 100f\n" + "93:" // Height 3: Multiply loop: Main loop skip + "cbz x11, 97f\n" "cmp x11, #0x2\n" - "blt 97f\n" - "96:" // Height 3: Multiply loop: Odd block loop + "blt 95f\n" + "94:" // Height 3: Multiply loop: Odd block loop "ldr s0, [x10], #0x4\n" "ldr s1, [x28], #0x4\n" "ldr s2, [x26], #0x4\n" @@ -1184,23 +1170,13 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" - "bge 96b\n" - "cbz x11, 100f\n" - "97:" // Height 3: Multiply loop: Skip odd blocks - "tbz x11, #1, 98f\n" - "ldr s0, [x10], #0x4\n" - "ldr s1, [x28], #0x4\n" - "ldr s2, [x26], #0x4\n" - "tbz x11, #0, 99f\n" - "ld1 { v0.h }[2], [x10]\n" - "ld1 { v1.h }[2], [x28]\n" - "ld1 { v2.h }[2], [x26]\n" - "b 99f\n" - "98:" // Height 3: Multiply loop: Ragged operand read: partial_1_0 + "bge 94b\n" + "cbz x11, 97f\n" + "95:" // Height 3: Multiply loop: Skip odd blocks "ldr h0, [x10, #0x0]\n" "ldr h1, [x28, #0x0]\n" "ldr h2, [x26, #0x0]\n" - "99:" // Height 3: Multiply loop: Ragged operand read: Done + "96:" // Height 3: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -1218,15 +1194,15 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f40f0eb // bfdot v11.4s, v7.8h, v0.h[0]\n" ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" - "100:" // Height 3: Multiply loop: No odd multiplies + "97:" // Height 3: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" - "bne 90b\n" + "bne 88b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" - "tbz %x[flags], #1, 101f\n" + "tbz %x[flags], #1, 98f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -1255,88 +1231,88 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmin v19.4s, v19.4s, v0.4s\n" "fmax v18.4s, v18.4s, v1.4s\n" "fmax v19.4s, v19.4s, v1.4s\n" - "101:" // Height 3: No activation + "98:" // Height 3: No activation "cmp x16, #0x10\n" - "bge 110f\n" - "tbz x16, #3, 105f\n" + "bge 107f\n" + "tbz x16, #3, 102f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v13.4s }, [x9], #0x10\n" "st1 { v16.4s }, [x27], #0x10\n" "st1 { v17.4s }, [x27], #0x10\n" - "tbz x16, #2, 103f\n" + "tbz x16, #2, 100f\n" "st1 { v10.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x9], #0x10\n" "st1 { v18.4s }, [x27], #0x10\n" - "tbz x16, #1, 102f\n" + "tbz x16, #1, 99f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" - "tbz x16, #0, 109f\n" + "tbz x16, #0, 106f\n" "st1 { v11.s }[2], [x13]\n" "st1 { v15.s }[2], [x9]\n" "st1 { v19.s }[2], [x27]\n" - "b 109f\n" - "102:" // Height 3: Partial direct writeback: partial_1_12 - "tbz x16, #0, 109f\n" + "b 106f\n" + "99:" // Height 3: Partial direct writeback: partial_1_12 + "tbz x16, #0, 106f\n" "str s11, [x13, #0x0]\n" "str s15, [x9, #0x0]\n" "str s19, [x27, #0x0]\n" - "b 109f\n" - "103:" // Height 3: Partial direct writeback: partial_2_8 - "tbz x16, #1, 104f\n" + "b 106f\n" + "100:" // Height 3: Partial direct writeback: partial_2_8 + "tbz x16, #1, 101f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" - "tbz x16, #0, 109f\n" + "tbz x16, #0, 106f\n" "st1 { v10.s }[2], [x13]\n" "st1 { v14.s }[2], [x9]\n" "st1 { v18.s }[2], [x27]\n" - "b 109f\n" - "104:" // Height 3: Partial direct writeback: partial_1_8 - "tbz x16, #0, 109f\n" + "b 106f\n" + "101:" // Height 3: Partial direct writeback: partial_1_8 + "tbz x16, #0, 106f\n" "str s10, [x13, #0x0]\n" "str s14, [x9, #0x0]\n" "str s18, [x27, #0x0]\n" - "b 109f\n" - "105:" // Height 3: Partial direct writeback: partial_4_0 - "tbz x16, #2, 107f\n" + "b 106f\n" + "102:" // Height 3: Partial direct writeback: partial_4_0 + "tbz x16, #2, 104f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v16.4s }, [x27], #0x10\n" - "tbz x16, #1, 106f\n" + "tbz x16, #1, 103f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" - "tbz x16, #0, 109f\n" + "tbz x16, #0, 106f\n" "st1 { v9.s }[2], [x13]\n" "st1 { v13.s }[2], [x9]\n" "st1 { v17.s }[2], [x27]\n" - "b 109f\n" - "106:" // Height 3: Partial direct writeback: partial_1_4 - "tbz x16, #0, 109f\n" + "b 106f\n" + "103:" // Height 3: Partial direct writeback: partial_1_4 + "tbz x16, #0, 106f\n" "str s9, [x13, #0x0]\n" "str s13, [x9, #0x0]\n" "str s17, [x27, #0x0]\n" - "b 109f\n" - "107:" // Height 3: Partial direct writeback: partial_2_0 - "tbz x16, #1, 108f\n" + "b 106f\n" + "104:" // Height 3: Partial direct writeback: partial_2_0 + "tbz x16, #1, 105f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" - "tbz x16, #0, 109f\n" + "tbz x16, #0, 106f\n" "st1 { v8.s }[2], [x13]\n" "st1 { v12.s }[2], [x9]\n" "st1 { v16.s }[2], [x27]\n" - "b 109f\n" - "108:" // Height 3: Partial direct writeback: partial_1_0 + "b 106f\n" + "105:" // Height 3: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" "str s12, [x9, #0x0]\n" "str s16, [x27, #0x0]\n" - "109:" // Height 3: Partial direct writeback: Done - "b 111f\n" - "110:" // Height 3: Full writeback + "106:" // Height 3: Partial direct writeback: Done + "b 108f\n" + "107:" // Height 3: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" @@ -1352,16 +1328,16 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x13, x13, #0x40\n" "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" - "111:" // Height 3: Writeback done + "108:" // Height 3: Writeback done "subs x16, x16, #0x10\n" - "bgt 77b\n" - "b 224f\n" - "112:" // Height 4 + "bgt 75b\n" + "b 218f\n" + "109:" // Height 4 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" - "tbz %x[flags], #2, 113f\n" + "tbz %x[flags], #2, 110f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #2\n" "ldr x9, [%x[output_ptr], #0x8]\n" @@ -1370,14 +1346,14 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr x25, [%x[output_ptr], #0x18]\n" "add x27, x27, x19, LSL #2\n" "add x25, x25, x19, LSL #2\n" - "b 114f\n" - "113:" // Height 4: setup direct output + "b 111f\n" + "110:" // Height 4: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #2\n" "add x27, x9, x19, LSL #2\n" "add x25, x27, x19, LSL #2\n" - "114:" // Height 4: Column loop - "cbz x14, 115f\n" + "111:" // Height 4: Column loop + "cbz x14, 112f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" @@ -1395,12 +1371,12 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "mov v21.16b, v9.16b\n" "mov v22.16b, v10.16b\n" "mov v23.16b, v11.16b\n" - "b 126f\n" - "115:" // Height 4: no bias - "tbz %x[flags], #0, 125f\n" + "b 123f\n" + "112:" // Height 4: no bias + "tbz %x[flags], #0, 122f\n" "cmp x16, #0x10\n" - "bge 124f\n" - "tbz x16, #3, 119f\n" + "bge 121f\n" + "tbz x16, #3, 116f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" @@ -1409,104 +1385,104 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ld1 { v13.4s }, [x9], #0x10\n" "ld1 { v17.4s }, [x27], #0x10\n" "ld1 { v21.4s }, [x25], #0x10\n" - "tbz x16, #2, 117f\n" + "tbz x16, #2, 114f\n" "ld1 { v10.4s }, [x13], #0x10\n" "ld1 { v14.4s }, [x9], #0x10\n" "ld1 { v18.4s }, [x27], #0x10\n" "ld1 { v22.4s }, [x25], #0x10\n" - "tbz x16, #1, 116f\n" + "tbz x16, #1, 113f\n" "mov x19, #0x38\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "ldr d23, [x25], #0x8\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ld1 { v11.s }[2], [x13]\n" "ld1 { v15.s }[2], [x9]\n" "ld1 { v19.s }[2], [x27]\n" "ld1 { v23.s }[2], [x25]\n" - "b 123f\n" - "116:" // Height 4: Partial accumulate: partial_1_12 + "b 120f\n" + "113:" // Height 4: Partial accumulate: partial_1_12 "mov x19, #0x30\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ldr s11, [x13, #0x0]\n" "ldr s15, [x9, #0x0]\n" "ldr s19, [x27, #0x0]\n" "ldr s23, [x25, #0x0]\n" - "b 123f\n" - "117:" // Height 4: Partial accumulate: partial_2_8 - "tbz x16, #1, 118f\n" + "b 120f\n" + "114:" // Height 4: Partial accumulate: partial_2_8 + "tbz x16, #1, 115f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "ldr d22, [x25], #0x8\n" "mov x19, #0x28\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ld1 { v10.s }[2], [x13]\n" "ld1 { v14.s }[2], [x9]\n" "ld1 { v18.s }[2], [x27]\n" "ld1 { v22.s }[2], [x25]\n" - "b 123f\n" - "118:" // Height 4: Partial accumulate: partial_1_8 + "b 120f\n" + "115:" // Height 4: Partial accumulate: partial_1_8 "mov x19, #0x20\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ldr s10, [x13, #0x0]\n" "ldr s14, [x9, #0x0]\n" "ldr s18, [x27, #0x0]\n" "ldr s22, [x25, #0x0]\n" - "b 123f\n" - "119:" // Height 4: Partial accumulate: partial_4_0 - "tbz x16, #2, 121f\n" + "b 120f\n" + "116:" // Height 4: Partial accumulate: partial_4_0 + "tbz x16, #2, 118f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" "ld1 { v20.4s }, [x25], #0x10\n" - "tbz x16, #1, 120f\n" + "tbz x16, #1, 117f\n" "mov x19, #0x18\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "ldr d21, [x25], #0x8\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ld1 { v9.s }[2], [x13]\n" "ld1 { v13.s }[2], [x9]\n" "ld1 { v17.s }[2], [x27]\n" "ld1 { v21.s }[2], [x25]\n" - "b 123f\n" - "120:" // Height 4: Partial accumulate: partial_1_4 + "b 120f\n" + "117:" // Height 4: Partial accumulate: partial_1_4 "mov x19, #0x10\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ldr s9, [x13, #0x0]\n" "ldr s13, [x9, #0x0]\n" "ldr s17, [x27, #0x0]\n" "ldr s21, [x25, #0x0]\n" - "b 123f\n" - "121:" // Height 4: Partial accumulate: partial_2_0 - "tbz x16, #1, 122f\n" + "b 120f\n" + "118:" // Height 4: Partial accumulate: partial_2_0 + "tbz x16, #1, 119f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "ldr d20, [x25], #0x8\n" "mov x19, #0x8\n" - "tbz x16, #0, 123f\n" + "tbz x16, #0, 120f\n" "ld1 { v8.s }[2], [x13]\n" "ld1 { v12.s }[2], [x9]\n" "ld1 { v16.s }[2], [x27]\n" "ld1 { v20.s }[2], [x25]\n" - "b 123f\n" - "122:" // Height 4: Partial accumulate: partial_1_0 + "b 120f\n" + "119:" // Height 4: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr s8, [x13, #0x0]\n" "ldr s12, [x9, #0x0]\n" "ldr s16, [x27, #0x0]\n" "ldr s20, [x25, #0x0]\n" - "123:" // Height 4: Partial accumulate: Done + "120:" // Height 4: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" - "b 126f\n" - "124:" // Height 4: full accumulate + "b 123f\n" + "121:" // Height 4: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" @@ -1523,8 +1499,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr q21, [x25, #0x10]\n" "ldr q22, [x25, #0x20]\n" "ldr q23, [x25, #0x30]\n" - "b 126f\n" - "125:" // Height 4: no accumulate + "b 123f\n" + "122:" // Height 4: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" @@ -1541,37 +1517,37 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "movi v21.16b, #0x0\n" "movi v22.16b, #0x0\n" "movi v23.16b, #0x0\n" - "126:" // Height 4: setup done + "123:" // Height 4: setup done "mov x12, #0x0\n" - "127:" // Height 4: String loop + "124:" // Height 4: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" - "tbz %x[flags], #3, 128f\n" + "tbz %x[flags], #3, 125f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" "ldr x28, [x20, #0x8]\n" "ldr x26, [x20, #0x10]\n" "ldr x24, [x20, #0x18]\n" - "cbnz x12, 129f\n" + "cbnz x12, 126f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "add x24, x24, x19, LSL #1\n" - "b 129f\n" - "128:" // Height 4: setup direct input + "b 126f\n" + "125:" // Height 4: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" - "129:" // Height 4: input setup done + "126:" // Height 4: input setup done "cmp x11, #0x8\n" - "blt 132f\n" + "blt 129f\n" "cmp x11, #0x10\n" - "blt 131f\n" - "130:" // Height 4: Multiply loop: Main loop head + "blt 128f\n" + "127:" // Height 4: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" @@ -1667,8 +1643,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" - "bge 130b\n" - "131:" // Height 4: Multiply loop: Single iteration only + "bge 127b\n" + "128:" // Height 4: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" @@ -1763,11 +1739,11 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f61f8ef // bfdot v15.4s, v7.8h, v1.h[3]\n" ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" - "132:" // Height 4: Multiply loop: Main loop skip - "cbz x11, 137f\n" + "129:" // Height 4: Multiply loop: Main loop skip + "cbz x11, 133f\n" "cmp x11, #0x2\n" - "blt 134f\n" - "133:" // Height 4: Multiply loop: Odd block loop + "blt 131f\n" + "130:" // Height 4: Multiply loop: Odd block loop "ldr s0, [x10], #0x4\n" "ldr s1, [x28], #0x4\n" "ldr s2, [x26], #0x4\n" @@ -1795,26 +1771,14 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" - "bge 133b\n" - "cbz x11, 137f\n" - "134:" // Height 4: Multiply loop: Skip odd blocks - "tbz x11, #1, 135f\n" - "ldr s0, [x10], #0x4\n" - "ldr s1, [x28], #0x4\n" - "ldr s2, [x26], #0x4\n" - "ldr s3, [x24], #0x4\n" - "tbz x11, #0, 136f\n" - "ld1 { v0.h }[2], [x10]\n" - "ld1 { v1.h }[2], [x28]\n" - "ld1 { v2.h }[2], [x26]\n" - "ld1 { v3.h }[2], [x24]\n" - "b 136f\n" - "135:" // Height 4: Multiply loop: Ragged operand read: partial_1_0 + "bge 130b\n" + "cbz x11, 133f\n" + "131:" // Height 4: Multiply loop: Skip odd blocks "ldr h0, [x10, #0x0]\n" "ldr h1, [x28, #0x0]\n" "ldr h2, [x26, #0x0]\n" "ldr h3, [x24, #0x0]\n" - "136:" // Height 4: Multiply loop: Ragged operand read: Done + "132:" // Height 4: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -1836,16 +1800,16 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f41f0ef // bfdot v15.4s, v7.8h, v1.h[0]\n" ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" - "137:" // Height 4: Multiply loop: No odd multiplies + "133:" // Height 4: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" - "bne 127b\n" + "bne 124b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" - "tbz %x[flags], #1, 138f\n" + "tbz %x[flags], #1, 134f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -1882,10 +1846,10 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmax v21.4s, v21.4s, v1.4s\n" "fmax v22.4s, v22.4s, v1.4s\n" "fmax v23.4s, v23.4s, v1.4s\n" - "138:" // Height 4: No activation + "134:" // Height 4: No activation "cmp x16, #0x10\n" - "bge 147f\n" - "tbz x16, #3, 142f\n" + "bge 143f\n" + "tbz x16, #3, 138f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" @@ -1894,92 +1858,92 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "st1 { v17.4s }, [x27], #0x10\n" "st1 { v20.4s }, [x25], #0x10\n" "st1 { v21.4s }, [x25], #0x10\n" - "tbz x16, #2, 140f\n" + "tbz x16, #2, 136f\n" "st1 { v10.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x9], #0x10\n" "st1 { v18.4s }, [x27], #0x10\n" "st1 { v22.4s }, [x25], #0x10\n" - "tbz x16, #1, 139f\n" + "tbz x16, #1, 135f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" - "tbz x16, #0, 146f\n" + "tbz x16, #0, 142f\n" "st1 { v11.s }[2], [x13]\n" "st1 { v15.s }[2], [x9]\n" "st1 { v19.s }[2], [x27]\n" "st1 { v23.s }[2], [x25]\n" - "b 146f\n" - "139:" // Height 4: Partial direct writeback: partial_1_12 - "tbz x16, #0, 146f\n" + "b 142f\n" + "135:" // Height 4: Partial direct writeback: partial_1_12 + "tbz x16, #0, 142f\n" "str s11, [x13, #0x0]\n" "str s15, [x9, #0x0]\n" "str s19, [x27, #0x0]\n" "str s23, [x25, #0x0]\n" - "b 146f\n" - "140:" // Height 4: Partial direct writeback: partial_2_8 - "tbz x16, #1, 141f\n" + "b 142f\n" + "136:" // Height 4: Partial direct writeback: partial_2_8 + "tbz x16, #1, 137f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" - "tbz x16, #0, 146f\n" + "tbz x16, #0, 142f\n" "st1 { v10.s }[2], [x13]\n" "st1 { v14.s }[2], [x9]\n" "st1 { v18.s }[2], [x27]\n" "st1 { v22.s }[2], [x25]\n" - "b 146f\n" - "141:" // Height 4: Partial direct writeback: partial_1_8 - "tbz x16, #0, 146f\n" + "b 142f\n" + "137:" // Height 4: Partial direct writeback: partial_1_8 + "tbz x16, #0, 142f\n" "str s10, [x13, #0x0]\n" "str s14, [x9, #0x0]\n" "str s18, [x27, #0x0]\n" "str s22, [x25, #0x0]\n" - "b 146f\n" - "142:" // Height 4: Partial direct writeback: partial_4_0 - "tbz x16, #2, 144f\n" + "b 142f\n" + "138:" // Height 4: Partial direct writeback: partial_4_0 + "tbz x16, #2, 140f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v16.4s }, [x27], #0x10\n" "st1 { v20.4s }, [x25], #0x10\n" - "tbz x16, #1, 143f\n" + "tbz x16, #1, 139f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" - "tbz x16, #0, 146f\n" + "tbz x16, #0, 142f\n" "st1 { v9.s }[2], [x13]\n" "st1 { v13.s }[2], [x9]\n" "st1 { v17.s }[2], [x27]\n" "st1 { v21.s }[2], [x25]\n" - "b 146f\n" - "143:" // Height 4: Partial direct writeback: partial_1_4 - "tbz x16, #0, 146f\n" + "b 142f\n" + "139:" // Height 4: Partial direct writeback: partial_1_4 + "tbz x16, #0, 142f\n" "str s9, [x13, #0x0]\n" "str s13, [x9, #0x0]\n" "str s17, [x27, #0x0]\n" "str s21, [x25, #0x0]\n" - "b 146f\n" - "144:" // Height 4: Partial direct writeback: partial_2_0 - "tbz x16, #1, 145f\n" + "b 142f\n" + "140:" // Height 4: Partial direct writeback: partial_2_0 + "tbz x16, #1, 141f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" - "tbz x16, #0, 146f\n" + "tbz x16, #0, 142f\n" "st1 { v8.s }[2], [x13]\n" "st1 { v12.s }[2], [x9]\n" "st1 { v16.s }[2], [x27]\n" "st1 { v20.s }[2], [x25]\n" - "b 146f\n" - "145:" // Height 4: Partial direct writeback: partial_1_0 + "b 142f\n" + "141:" // Height 4: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" "str s12, [x9, #0x0]\n" "str s16, [x27, #0x0]\n" "str s20, [x25, #0x0]\n" - "146:" // Height 4: Partial direct writeback: Done - "b 148f\n" - "147:" // Height 4: Full writeback + "142:" // Height 4: Partial direct writeback: Done + "b 144f\n" + "143:" // Height 4: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" @@ -2000,16 +1964,16 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x9, x9, #0x40\n" "add x27, x27, #0x40\n" "add x25, x25, #0x40\n" - "148:" // Height 4: Writeback done + "144:" // Height 4: Writeback done "subs x16, x16, #0x10\n" - "bgt 114b\n" - "b 224f\n" - "149:" // Height 5 + "bgt 111b\n" + "b 218f\n" + "145:" // Height 5 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" - "tbz %x[flags], #2, 150f\n" + "tbz %x[flags], #2, 146f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #2\n" "ldr x9, [%x[output_ptr], #0x8]\n" @@ -2020,15 +1984,15 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x27, x27, x19, LSL #2\n" "add x25, x25, x19, LSL #2\n" "add x23, x23, x19, LSL #2\n" - "b 151f\n" - "150:" // Height 5: setup direct output + "b 147f\n" + "146:" // Height 5: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #2\n" "add x27, x9, x19, LSL #2\n" "add x25, x27, x19, LSL #2\n" "add x23, x25, x19, LSL #2\n" - "151:" // Height 5: Column loop - "cbz x14, 152f\n" + "147:" // Height 5: Column loop + "cbz x14, 148f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" @@ -2050,12 +2014,12 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "mov v25.16b, v9.16b\n" "mov v26.16b, v10.16b\n" "mov v27.16b, v11.16b\n" - "b 163f\n" - "152:" // Height 5: no bias - "tbz %x[flags], #0, 162f\n" + "b 159f\n" + "148:" // Height 5: no bias + "tbz %x[flags], #0, 158f\n" "cmp x16, #0x10\n" - "bge 161f\n" - "tbz x16, #3, 156f\n" + "bge 157f\n" + "tbz x16, #3, 152f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" @@ -2066,119 +2030,119 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ld1 { v17.4s }, [x27], #0x10\n" "ld1 { v21.4s }, [x25], #0x10\n" "ld1 { v25.4s }, [x23], #0x10\n" - "tbz x16, #2, 154f\n" + "tbz x16, #2, 150f\n" "ld1 { v10.4s }, [x13], #0x10\n" "ld1 { v14.4s }, [x9], #0x10\n" "ld1 { v18.4s }, [x27], #0x10\n" "ld1 { v22.4s }, [x25], #0x10\n" "ld1 { v26.4s }, [x23], #0x10\n" - "tbz x16, #1, 153f\n" + "tbz x16, #1, 149f\n" "mov x19, #0x38\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" "ldr d19, [x27], #0x8\n" "ldr d23, [x25], #0x8\n" "ldr d27, [x23], #0x8\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ld1 { v11.s }[2], [x13]\n" "ld1 { v15.s }[2], [x9]\n" "ld1 { v19.s }[2], [x27]\n" "ld1 { v23.s }[2], [x25]\n" "ld1 { v27.s }[2], [x23]\n" - "b 160f\n" - "153:" // Height 5: Partial accumulate: partial_1_12 + "b 156f\n" + "149:" // Height 5: Partial accumulate: partial_1_12 "mov x19, #0x30\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ldr s11, [x13, #0x0]\n" "ldr s15, [x9, #0x0]\n" "ldr s19, [x27, #0x0]\n" "ldr s23, [x25, #0x0]\n" "ldr s27, [x23, #0x0]\n" - "b 160f\n" - "154:" // Height 5: Partial accumulate: partial_2_8 - "tbz x16, #1, 155f\n" + "b 156f\n" + "150:" // Height 5: Partial accumulate: partial_2_8 + "tbz x16, #1, 151f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" "ldr d22, [x25], #0x8\n" "ldr d26, [x23], #0x8\n" "mov x19, #0x28\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ld1 { v10.s }[2], [x13]\n" "ld1 { v14.s }[2], [x9]\n" "ld1 { v18.s }[2], [x27]\n" "ld1 { v22.s }[2], [x25]\n" "ld1 { v26.s }[2], [x23]\n" - "b 160f\n" - "155:" // Height 5: Partial accumulate: partial_1_8 + "b 156f\n" + "151:" // Height 5: Partial accumulate: partial_1_8 "mov x19, #0x20\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ldr s10, [x13, #0x0]\n" "ldr s14, [x9, #0x0]\n" "ldr s18, [x27, #0x0]\n" "ldr s22, [x25, #0x0]\n" "ldr s26, [x23, #0x0]\n" - "b 160f\n" - "156:" // Height 5: Partial accumulate: partial_4_0 - "tbz x16, #2, 158f\n" + "b 156f\n" + "152:" // Height 5: Partial accumulate: partial_4_0 + "tbz x16, #2, 154f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" "ld1 { v20.4s }, [x25], #0x10\n" "ld1 { v24.4s }, [x23], #0x10\n" - "tbz x16, #1, 157f\n" + "tbz x16, #1, 153f\n" "mov x19, #0x18\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" "ldr d17, [x27], #0x8\n" "ldr d21, [x25], #0x8\n" "ldr d25, [x23], #0x8\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ld1 { v9.s }[2], [x13]\n" "ld1 { v13.s }[2], [x9]\n" "ld1 { v17.s }[2], [x27]\n" "ld1 { v21.s }[2], [x25]\n" "ld1 { v25.s }[2], [x23]\n" - "b 160f\n" - "157:" // Height 5: Partial accumulate: partial_1_4 + "b 156f\n" + "153:" // Height 5: Partial accumulate: partial_1_4 "mov x19, #0x10\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ldr s9, [x13, #0x0]\n" "ldr s13, [x9, #0x0]\n" "ldr s17, [x27, #0x0]\n" "ldr s21, [x25, #0x0]\n" "ldr s25, [x23, #0x0]\n" - "b 160f\n" - "158:" // Height 5: Partial accumulate: partial_2_0 - "tbz x16, #1, 159f\n" + "b 156f\n" + "154:" // Height 5: Partial accumulate: partial_2_0 + "tbz x16, #1, 155f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" "ldr d20, [x25], #0x8\n" "ldr d24, [x23], #0x8\n" "mov x19, #0x8\n" - "tbz x16, #0, 160f\n" + "tbz x16, #0, 156f\n" "ld1 { v8.s }[2], [x13]\n" "ld1 { v12.s }[2], [x9]\n" "ld1 { v16.s }[2], [x27]\n" "ld1 { v20.s }[2], [x25]\n" "ld1 { v24.s }[2], [x23]\n" - "b 160f\n" - "159:" // Height 5: Partial accumulate: partial_1_0 + "b 156f\n" + "155:" // Height 5: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr s8, [x13, #0x0]\n" "ldr s12, [x9, #0x0]\n" "ldr s16, [x27, #0x0]\n" "ldr s20, [x25, #0x0]\n" "ldr s24, [x23, #0x0]\n" - "160:" // Height 5: Partial accumulate: Done + "156:" // Height 5: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" "sub x23, x23, x19\n" - "b 163f\n" - "161:" // Height 5: full accumulate + "b 159f\n" + "157:" // Height 5: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" @@ -2199,8 +2163,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr q25, [x23, #0x10]\n" "ldr q26, [x23, #0x20]\n" "ldr q27, [x23, #0x30]\n" - "b 163f\n" - "162:" // Height 5: no accumulate + "b 159f\n" + "158:" // Height 5: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" @@ -2221,13 +2185,13 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "movi v25.16b, #0x0\n" "movi v26.16b, #0x0\n" "movi v27.16b, #0x0\n" - "163:" // Height 5: setup done + "159:" // Height 5: setup done "mov x12, #0x0\n" - "164:" // Height 5: String loop + "160:" // Height 5: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" - "tbz %x[flags], #3, 165f\n" + "tbz %x[flags], #3, 161f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" @@ -2235,26 +2199,26 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr x26, [x20, #0x10]\n" "ldr x24, [x20, #0x18]\n" "ldr x22, [x20, #0x20]\n" - "cbnz x12, 166f\n" + "cbnz x12, 162f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" "add x26, x26, x19, LSL #1\n" "add x24, x24, x19, LSL #1\n" "add x22, x22, x19, LSL #1\n" - "b 166f\n" - "165:" // Height 5: setup direct input + "b 162f\n" + "161:" // Height 5: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" "add x22, x24, x19, LSL #1\n" - "166:" // Height 5: input setup done + "162:" // Height 5: input setup done "cmp x11, #0x8\n" - "blt 169f\n" + "blt 165f\n" "cmp x11, #0x10\n" - "blt 168f\n" - "167:" // Height 5: Multiply loop: Main loop head + "blt 164f\n" + "163:" // Height 5: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" @@ -2369,8 +2333,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" ".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n" - "bge 167b\n" - "168:" // Height 5: Multiply loop: Single iteration only + "bge 163b\n" + "164:" // Height 5: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" @@ -2484,11 +2448,11 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f62f8f3 // bfdot v19.4s, v7.8h, v2.h[3]\n" ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" ".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n" - "169:" // Height 5: Multiply loop: Main loop skip - "cbz x11, 174f\n" + "165:" // Height 5: Multiply loop: Main loop skip + "cbz x11, 169f\n" "cmp x11, #0x2\n" - "blt 171f\n" - "170:" // Height 5: Multiply loop: Odd block loop + "blt 167f\n" + "166:" // Height 5: Multiply loop: Odd block loop "ldr s0, [x10], #0x4\n" "ldr s1, [x28], #0x4\n" "ldr s2, [x26], #0x4\n" @@ -2521,29 +2485,15 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" ".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n" - "bge 170b\n" - "cbz x11, 174f\n" - "171:" // Height 5: Multiply loop: Skip odd blocks - "tbz x11, #1, 172f\n" - "ldr s0, [x10], #0x4\n" - "ldr s1, [x28], #0x4\n" - "ldr s2, [x26], #0x4\n" - "ldr s3, [x24], #0x4\n" - "ldr s4, [x22], #0x4\n" - "tbz x11, #0, 173f\n" - "ld1 { v0.h }[2], [x10]\n" - "ld1 { v1.h }[2], [x28]\n" - "ld1 { v2.h }[2], [x26]\n" - "ld1 { v3.h }[2], [x24]\n" - "ld1 { v4.h }[2], [x22]\n" - "b 173f\n" - "172:" // Height 5: Multiply loop: Ragged operand read: partial_1_0 + "bge 166b\n" + "cbz x11, 169f\n" + "167:" // Height 5: Multiply loop: Skip odd blocks "ldr h0, [x10, #0x0]\n" "ldr h1, [x28, #0x0]\n" "ldr h2, [x26, #0x0]\n" "ldr h3, [x24, #0x0]\n" "ldr h4, [x22, #0x0]\n" - "173:" // Height 5: Multiply loop: Ragged operand read: Done + "168:" // Height 5: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -2569,17 +2519,17 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f42f0f3 // bfdot v19.4s, v7.8h, v2.h[0]\n" ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" ".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n" - "174:" // Height 5: Multiply loop: No odd multiplies + "169:" // Height 5: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" - "bne 164b\n" + "bne 160b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" "prfm pstl1keep, [x23, #0x0]\n" - "tbz %x[flags], #1, 175f\n" + "tbz %x[flags], #1, 170f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -2624,10 +2574,10 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmax v26.4s, v26.4s, v1.4s\n" "fmin v27.4s, v27.4s, v0.4s\n" "fmax v27.4s, v27.4s, v1.4s\n" - "175:" // Height 5: No activation + "170:" // Height 5: No activation "cmp x16, #0x10\n" - "bge 184f\n" - "tbz x16, #3, 179f\n" + "bge 179f\n" + "tbz x16, #3, 174f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" @@ -2638,106 +2588,106 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "st1 { v21.4s }, [x25], #0x10\n" "st1 { v24.4s }, [x23], #0x10\n" "st1 { v25.4s }, [x23], #0x10\n" - "tbz x16, #2, 177f\n" + "tbz x16, #2, 172f\n" "st1 { v10.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x9], #0x10\n" "st1 { v18.4s }, [x27], #0x10\n" "st1 { v22.4s }, [x25], #0x10\n" "st1 { v26.4s }, [x23], #0x10\n" - "tbz x16, #1, 176f\n" + "tbz x16, #1, 171f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" "str d27, [x23], #0x8\n" - "tbz x16, #0, 183f\n" + "tbz x16, #0, 178f\n" "st1 { v11.s }[2], [x13]\n" "st1 { v15.s }[2], [x9]\n" "st1 { v19.s }[2], [x27]\n" "st1 { v23.s }[2], [x25]\n" "st1 { v27.s }[2], [x23]\n" - "b 183f\n" - "176:" // Height 5: Partial direct writeback: partial_1_12 - "tbz x16, #0, 183f\n" + "b 178f\n" + "171:" // Height 5: Partial direct writeback: partial_1_12 + "tbz x16, #0, 178f\n" "str s11, [x13, #0x0]\n" "str s15, [x9, #0x0]\n" "str s19, [x27, #0x0]\n" "str s23, [x25, #0x0]\n" "str s27, [x23, #0x0]\n" - "b 183f\n" - "177:" // Height 5: Partial direct writeback: partial_2_8 - "tbz x16, #1, 178f\n" + "b 178f\n" + "172:" // Height 5: Partial direct writeback: partial_2_8 + "tbz x16, #1, 173f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" "str d26, [x23], #0x8\n" - "tbz x16, #0, 183f\n" + "tbz x16, #0, 178f\n" "st1 { v10.s }[2], [x13]\n" "st1 { v14.s }[2], [x9]\n" "st1 { v18.s }[2], [x27]\n" "st1 { v22.s }[2], [x25]\n" "st1 { v26.s }[2], [x23]\n" - "b 183f\n" - "178:" // Height 5: Partial direct writeback: partial_1_8 - "tbz x16, #0, 183f\n" + "b 178f\n" + "173:" // Height 5: Partial direct writeback: partial_1_8 + "tbz x16, #0, 178f\n" "str s10, [x13, #0x0]\n" "str s14, [x9, #0x0]\n" "str s18, [x27, #0x0]\n" "str s22, [x25, #0x0]\n" "str s26, [x23, #0x0]\n" - "b 183f\n" - "179:" // Height 5: Partial direct writeback: partial_4_0 - "tbz x16, #2, 181f\n" + "b 178f\n" + "174:" // Height 5: Partial direct writeback: partial_4_0 + "tbz x16, #2, 176f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v16.4s }, [x27], #0x10\n" "st1 { v20.4s }, [x25], #0x10\n" "st1 { v24.4s }, [x23], #0x10\n" - "tbz x16, #1, 180f\n" + "tbz x16, #1, 175f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" "str d25, [x23], #0x8\n" - "tbz x16, #0, 183f\n" + "tbz x16, #0, 178f\n" "st1 { v9.s }[2], [x13]\n" "st1 { v13.s }[2], [x9]\n" "st1 { v17.s }[2], [x27]\n" "st1 { v21.s }[2], [x25]\n" "st1 { v25.s }[2], [x23]\n" - "b 183f\n" - "180:" // Height 5: Partial direct writeback: partial_1_4 - "tbz x16, #0, 183f\n" + "b 178f\n" + "175:" // Height 5: Partial direct writeback: partial_1_4 + "tbz x16, #0, 178f\n" "str s9, [x13, #0x0]\n" "str s13, [x9, #0x0]\n" "str s17, [x27, #0x0]\n" "str s21, [x25, #0x0]\n" "str s25, [x23, #0x0]\n" - "b 183f\n" - "181:" // Height 5: Partial direct writeback: partial_2_0 - "tbz x16, #1, 182f\n" + "b 178f\n" + "176:" // Height 5: Partial direct writeback: partial_2_0 + "tbz x16, #1, 177f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" "str d24, [x23], #0x8\n" - "tbz x16, #0, 183f\n" + "tbz x16, #0, 178f\n" "st1 { v8.s }[2], [x13]\n" "st1 { v12.s }[2], [x9]\n" "st1 { v16.s }[2], [x27]\n" "st1 { v20.s }[2], [x25]\n" "st1 { v24.s }[2], [x23]\n" - "b 183f\n" - "182:" // Height 5: Partial direct writeback: partial_1_0 + "b 178f\n" + "177:" // Height 5: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" "str s12, [x9, #0x0]\n" "str s16, [x27, #0x0]\n" "str s20, [x25, #0x0]\n" "str s24, [x23, #0x0]\n" - "183:" // Height 5: Partial direct writeback: Done - "b 185f\n" - "184:" // Height 5: Full writeback + "178:" // Height 5: Partial direct writeback: Done + "b 180f\n" + "179:" // Height 5: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" @@ -2763,16 +2713,16 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x27, x27, #0x40\n" "add x25, x25, #0x40\n" "add x23, x23, #0x40\n" - "185:" // Height 5: Writeback done + "180:" // Height 5: Writeback done "subs x16, x16, #0x10\n" - "bgt 151b\n" - "b 224f\n" - "186:" // Height 6 + "bgt 147b\n" + "b 218f\n" + "181:" // Height 6 "ldr x16, [%x[args_ptr], %[offsetof_N]]\n" "mov x14, %x[bias]\n" "ldr x15, [%x[args_ptr], %[offsetof_B_ptr]]\n" "ldr x19, [%x[args_ptr], %[offsetof_output_offset]]\n" - "tbz %x[flags], #2, 187f\n" + "tbz %x[flags], #2, 182f\n" "ldr x13, [%x[output_ptr], #0x0]\n" "add x13, x13, x19, LSL #2\n" "ldr x9, [%x[output_ptr], #0x8]\n" @@ -2786,8 +2736,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x25, x25, x19, LSL #2\n" "add x23, x23, x19, LSL #2\n" "add x21, x21, x19, LSL #2\n" - "b 188f\n" - "187:" // Height 6: setup direct output + "b 183f\n" + "182:" // Height 6: setup direct output "mov x13, %x[output_ptr]\n" "add x9, x13, x19, LSL #2\n" "add x27, x9, x19, LSL #2\n" @@ -2795,8 +2745,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x23, x25, x19, LSL #2\n" "add x21, x23, x19, LSL #2\n" "add %x[output_ptr], x21, x19, LSL #2\n" - "188:" // Height 6: Column loop - "cbz x14, 189f\n" + "183:" // Height 6: Column loop + "cbz x14, 184f\n" "ldr q8, [x14, #0x0]\n" "mov v12.16b, v8.16b\n" "ldr q9, [x14, #0x10]\n" @@ -2822,12 +2772,12 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "mov v29.16b, v9.16b\n" "mov v30.16b, v10.16b\n" "mov v31.16b, v11.16b\n" - "b 200f\n" - "189:" // Height 6: no bias - "tbz %x[flags], #0, 199f\n" + "b 195f\n" + "184:" // Height 6: no bias + "tbz %x[flags], #0, 194f\n" "cmp x16, #0x10\n" - "bge 198f\n" - "tbz x16, #3, 193f\n" + "bge 193f\n" + "tbz x16, #3, 188f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" @@ -2840,14 +2790,14 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ld1 { v21.4s }, [x25], #0x10\n" "ld1 { v25.4s }, [x23], #0x10\n" "ld1 { v29.4s }, [x21], #0x10\n" - "tbz x16, #2, 191f\n" + "tbz x16, #2, 186f\n" "ld1 { v10.4s }, [x13], #0x10\n" "ld1 { v14.4s }, [x9], #0x10\n" "ld1 { v18.4s }, [x27], #0x10\n" "ld1 { v22.4s }, [x25], #0x10\n" "ld1 { v26.4s }, [x23], #0x10\n" "ld1 { v30.4s }, [x21], #0x10\n" - "tbz x16, #1, 190f\n" + "tbz x16, #1, 185f\n" "mov x19, #0x38\n" "ldr d11, [x13], #0x8\n" "ldr d15, [x9], #0x8\n" @@ -2855,26 +2805,26 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr d23, [x25], #0x8\n" "ldr d27, [x23], #0x8\n" "ldr d31, [x21], #0x8\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ld1 { v11.s }[2], [x13]\n" "ld1 { v15.s }[2], [x9]\n" "ld1 { v19.s }[2], [x27]\n" "ld1 { v23.s }[2], [x25]\n" "ld1 { v27.s }[2], [x23]\n" "ld1 { v31.s }[2], [x21]\n" - "b 197f\n" - "190:" // Height 6: Partial accumulate: partial_1_12 + "b 192f\n" + "185:" // Height 6: Partial accumulate: partial_1_12 "mov x19, #0x30\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ldr s11, [x13, #0x0]\n" "ldr s15, [x9, #0x0]\n" "ldr s19, [x27, #0x0]\n" "ldr s23, [x25, #0x0]\n" "ldr s27, [x23, #0x0]\n" "ldr s31, [x21, #0x0]\n" - "b 197f\n" - "191:" // Height 6: Partial accumulate: partial_2_8 - "tbz x16, #1, 192f\n" + "b 192f\n" + "186:" // Height 6: Partial accumulate: partial_2_8 + "tbz x16, #1, 187f\n" "ldr d10, [x13], #0x8\n" "ldr d14, [x9], #0x8\n" "ldr d18, [x27], #0x8\n" @@ -2882,33 +2832,33 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr d26, [x23], #0x8\n" "ldr d30, [x21], #0x8\n" "mov x19, #0x28\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ld1 { v10.s }[2], [x13]\n" "ld1 { v14.s }[2], [x9]\n" "ld1 { v18.s }[2], [x27]\n" "ld1 { v22.s }[2], [x25]\n" "ld1 { v26.s }[2], [x23]\n" "ld1 { v30.s }[2], [x21]\n" - "b 197f\n" - "192:" // Height 6: Partial accumulate: partial_1_8 + "b 192f\n" + "187:" // Height 6: Partial accumulate: partial_1_8 "mov x19, #0x20\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ldr s10, [x13, #0x0]\n" "ldr s14, [x9, #0x0]\n" "ldr s18, [x27, #0x0]\n" "ldr s22, [x25, #0x0]\n" "ldr s26, [x23, #0x0]\n" "ldr s30, [x21, #0x0]\n" - "b 197f\n" - "193:" // Height 6: Partial accumulate: partial_4_0 - "tbz x16, #2, 195f\n" + "b 192f\n" + "188:" // Height 6: Partial accumulate: partial_4_0 + "tbz x16, #2, 190f\n" "ld1 { v8.4s }, [x13], #0x10\n" "ld1 { v12.4s }, [x9], #0x10\n" "ld1 { v16.4s }, [x27], #0x10\n" "ld1 { v20.4s }, [x25], #0x10\n" "ld1 { v24.4s }, [x23], #0x10\n" "ld1 { v28.4s }, [x21], #0x10\n" - "tbz x16, #1, 194f\n" + "tbz x16, #1, 189f\n" "mov x19, #0x18\n" "ldr d9, [x13], #0x8\n" "ldr d13, [x9], #0x8\n" @@ -2916,26 +2866,26 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr d21, [x25], #0x8\n" "ldr d25, [x23], #0x8\n" "ldr d29, [x21], #0x8\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ld1 { v9.s }[2], [x13]\n" "ld1 { v13.s }[2], [x9]\n" "ld1 { v17.s }[2], [x27]\n" "ld1 { v21.s }[2], [x25]\n" "ld1 { v25.s }[2], [x23]\n" "ld1 { v29.s }[2], [x21]\n" - "b 197f\n" - "194:" // Height 6: Partial accumulate: partial_1_4 + "b 192f\n" + "189:" // Height 6: Partial accumulate: partial_1_4 "mov x19, #0x10\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ldr s9, [x13, #0x0]\n" "ldr s13, [x9, #0x0]\n" "ldr s17, [x27, #0x0]\n" "ldr s21, [x25, #0x0]\n" "ldr s25, [x23, #0x0]\n" "ldr s29, [x21, #0x0]\n" - "b 197f\n" - "195:" // Height 6: Partial accumulate: partial_2_0 - "tbz x16, #1, 196f\n" + "b 192f\n" + "190:" // Height 6: Partial accumulate: partial_2_0 + "tbz x16, #1, 191f\n" "ldr d8, [x13], #0x8\n" "ldr d12, [x9], #0x8\n" "ldr d16, [x27], #0x8\n" @@ -2943,15 +2893,15 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr d24, [x23], #0x8\n" "ldr d28, [x21], #0x8\n" "mov x19, #0x8\n" - "tbz x16, #0, 197f\n" + "tbz x16, #0, 192f\n" "ld1 { v8.s }[2], [x13]\n" "ld1 { v12.s }[2], [x9]\n" "ld1 { v16.s }[2], [x27]\n" "ld1 { v20.s }[2], [x25]\n" "ld1 { v24.s }[2], [x23]\n" "ld1 { v28.s }[2], [x21]\n" - "b 197f\n" - "196:" // Height 6: Partial accumulate: partial_1_0 + "b 192f\n" + "191:" // Height 6: Partial accumulate: partial_1_0 "mov x19, #0x0\n" "ldr s8, [x13, #0x0]\n" "ldr s12, [x9, #0x0]\n" @@ -2959,15 +2909,15 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr s20, [x25, #0x0]\n" "ldr s24, [x23, #0x0]\n" "ldr s28, [x21, #0x0]\n" - "197:" // Height 6: Partial accumulate: Done + "192:" // Height 6: Partial accumulate: Done "sub x13, x13, x19\n" "sub x9, x9, x19\n" "sub x27, x27, x19\n" "sub x25, x25, x19\n" "sub x23, x23, x19\n" "sub x21, x21, x19\n" - "b 200f\n" - "198:" // Height 6: full accumulate + "b 195f\n" + "193:" // Height 6: full accumulate "ldr q8, [x13, #0x0]\n" "ldr q9, [x13, #0x10]\n" "ldr q10, [x13, #0x20]\n" @@ -2992,8 +2942,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr q29, [x21, #0x10]\n" "ldr q30, [x21, #0x20]\n" "ldr q31, [x21, #0x30]\n" - "b 200f\n" - "199:" // Height 6: no accumulate + "b 195f\n" + "194:" // Height 6: no accumulate "movi v8.16b, #0x0\n" "movi v9.16b, #0x0\n" "movi v10.16b, #0x0\n" @@ -3018,13 +2968,13 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "movi v29.16b, #0x0\n" "movi v30.16b, #0x0\n" "movi v31.16b, #0x0\n" - "200:" // Height 6: setup done + "195:" // Height 6: setup done "mov x12, #0x0\n" - "201:" // Height 6: String loop + "196:" // Height 6: String loop "ldr x20, [%x[args_ptr], %[offsetof_string_lengths]]\n" "ldr x19, [%x[args_ptr], %[offsetof_input_offset]]\n" "ldr w11, [x20, x12, LSL #0x2]\n" - "tbz %x[flags], #3, 202f\n" + "tbz %x[flags], #3, 197f\n" "ldr x20, [%x[input_ptr], x12, LSL #0x3]\n" "add x20, x20, x19, LSL #3\n" "ldr x10, [x20, #0x0]\n" @@ -3033,7 +2983,7 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "ldr x24, [x20, #0x18]\n" "ldr x22, [x20, #0x20]\n" "ldr x20, [x20, #0x28]\n" - "cbnz x12, 203f\n" + "cbnz x12, 198f\n" "ldr x19, [%x[args_ptr], %[offsetof_input_initial_col]]\n" "add x10, x10, x19, LSL #1\n" "add x28, x28, x19, LSL #1\n" @@ -3041,20 +2991,20 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x24, x24, x19, LSL #1\n" "add x22, x22, x19, LSL #1\n" "add x20, x20, x19, LSL #1\n" - "b 203f\n" - "202:" // Height 6: setup direct input + "b 198f\n" + "197:" // Height 6: setup direct input "mov x10, %x[input_ptr]\n" "add x28, x10, x19, LSL #1\n" "add x26, x28, x19, LSL #1\n" "add x24, x26, x19, LSL #1\n" "add x22, x24, x19, LSL #1\n" "add x20, x22, x19, LSL #1\n" - "203:" // Height 6: input setup done + "198:" // Height 6: input setup done "cmp x11, #0x8\n" - "blt 206f\n" + "blt 201f\n" "cmp x11, #0x10\n" - "blt 205f\n" - "204:" // Height 6: Multiply loop: Main loop head + "blt 200f\n" + "199:" // Height 6: Multiply loop: Main loop head "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" "ldr q2, [x26, #0x0]\n" @@ -3188,8 +3138,8 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" ".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n" ".inst 0x4f65f8ff // bfdot v31.4s, v7.8h, v5.h[3]\n" - "bge 204b\n" - "205:" // Height 6: Multiply loop: Single iteration only + "bge 199b\n" + "200:" // Height 6: Multiply loop: Single iteration only "sub x11, x11, #0x8\n" "ldr q0, [x10, #0x0]\n" "ldr q1, [x28, #0x0]\n" @@ -3322,11 +3272,11 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f63f8f7 // bfdot v23.4s, v7.8h, v3.h[3]\n" ".inst 0x4f64f8fb // bfdot v27.4s, v7.8h, v4.h[3]\n" ".inst 0x4f65f8ff // bfdot v31.4s, v7.8h, v5.h[3]\n" - "206:" // Height 6: Multiply loop: Main loop skip - "cbz x11, 211f\n" + "201:" // Height 6: Multiply loop: Main loop skip + "cbz x11, 205f\n" "cmp x11, #0x2\n" - "blt 208f\n" - "207:" // Height 6: Multiply loop: Odd block loop + "blt 203f\n" + "202:" // Height 6: Multiply loop: Odd block loop "ldr s0, [x10], #0x4\n" "ldr s1, [x28], #0x4\n" "ldr s2, [x26], #0x4\n" @@ -3364,32 +3314,16 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" ".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n" ".inst 0x4f45f0ff // bfdot v31.4s, v7.8h, v5.h[0]\n" - "bge 207b\n" - "cbz x11, 211f\n" - "208:" // Height 6: Multiply loop: Skip odd blocks - "tbz x11, #1, 209f\n" - "ldr s0, [x10], #0x4\n" - "ldr s1, [x28], #0x4\n" - "ldr s2, [x26], #0x4\n" - "ldr s3, [x24], #0x4\n" - "ldr s4, [x22], #0x4\n" - "ldr s5, [x20], #0x4\n" - "tbz x11, #0, 210f\n" - "ld1 { v0.h }[2], [x10]\n" - "ld1 { v1.h }[2], [x28]\n" - "ld1 { v2.h }[2], [x26]\n" - "ld1 { v3.h }[2], [x24]\n" - "ld1 { v4.h }[2], [x22]\n" - "ld1 { v5.h }[2], [x20]\n" - "b 210f\n" - "209:" // Height 6: Multiply loop: Ragged operand read: partial_1_0 + "bge 202b\n" + "cbz x11, 205f\n" + "203:" // Height 6: Multiply loop: Skip odd blocks "ldr h0, [x10, #0x0]\n" "ldr h1, [x28, #0x0]\n" "ldr h2, [x26, #0x0]\n" "ldr h3, [x24, #0x0]\n" "ldr h4, [x22, #0x0]\n" "ldr h5, [x20, #0x0]\n" - "210:" // Height 6: Multiply loop: Ragged operand read: Done + "204:" // Height 6: Multiply loop: Ragged operand read: Done "ldr q6, [x15, #0x0]\n" ".inst 0x4f40f0c8 // bfdot v8.4s, v6.8h, v0.h[0]\n" "ldr q7, [x15, #0x10]\n" @@ -3419,18 +3353,18 @@ void a64_hybrid_bf16fp32_dot_6x16 ( ".inst 0x4f43f0f7 // bfdot v23.4s, v7.8h, v3.h[0]\n" ".inst 0x4f44f0fb // bfdot v27.4s, v7.8h, v4.h[0]\n" ".inst 0x4f45f0ff // bfdot v31.4s, v7.8h, v5.h[0]\n" - "211:" // Height 6: Multiply loop: No odd multiplies + "205:" // Height 6: Multiply loop: No odd multiplies "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "add x12, x12, #0x1\n" "cmp x12, x19\n" - "bne 201b\n" + "bne 196b\n" "prfm pstl1keep, [x13, #0x0]\n" "prfm pstl1keep, [x9, #0x0]\n" "prfm pstl1keep, [x27, #0x0]\n" "prfm pstl1keep, [x25, #0x0]\n" "prfm pstl1keep, [x23, #0x0]\n" "prfm pstl1keep, [x21, #0x0]\n" - "tbz %x[flags], #1, 212f\n" + "tbz %x[flags], #1, 206f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1r { v1.4s }, [x19]\n" "add x19, %x[args_ptr], %[offset_max]\n" @@ -3483,10 +3417,10 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "fmin v31.4s, v31.4s, v0.4s\n" "fmax v30.4s, v30.4s, v1.4s\n" "fmax v31.4s, v31.4s, v1.4s\n" - "212:" // Height 6: No activation + "206:" // Height 6: No activation "cmp x16, #0x10\n" - "bge 221f\n" - "tbz x16, #3, 216f\n" + "bge 215f\n" + "tbz x16, #3, 210f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v9.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" @@ -3499,120 +3433,120 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "st1 { v25.4s }, [x23], #0x10\n" "st1 { v28.4s }, [x21], #0x10\n" "st1 { v29.4s }, [x21], #0x10\n" - "tbz x16, #2, 214f\n" + "tbz x16, #2, 208f\n" "st1 { v10.4s }, [x13], #0x10\n" "st1 { v14.4s }, [x9], #0x10\n" "st1 { v18.4s }, [x27], #0x10\n" "st1 { v22.4s }, [x25], #0x10\n" "st1 { v26.4s }, [x23], #0x10\n" "st1 { v30.4s }, [x21], #0x10\n" - "tbz x16, #1, 213f\n" + "tbz x16, #1, 207f\n" "str d11, [x13], #0x8\n" "str d15, [x9], #0x8\n" "str d19, [x27], #0x8\n" "str d23, [x25], #0x8\n" "str d27, [x23], #0x8\n" "str d31, [x21], #0x8\n" - "tbz x16, #0, 220f\n" + "tbz x16, #0, 214f\n" "st1 { v11.s }[2], [x13]\n" "st1 { v15.s }[2], [x9]\n" "st1 { v19.s }[2], [x27]\n" "st1 { v23.s }[2], [x25]\n" "st1 { v27.s }[2], [x23]\n" "st1 { v31.s }[2], [x21]\n" - "b 220f\n" - "213:" // Height 6: Partial direct writeback: partial_1_12 - "tbz x16, #0, 220f\n" + "b 214f\n" + "207:" // Height 6: Partial direct writeback: partial_1_12 + "tbz x16, #0, 214f\n" "str s11, [x13, #0x0]\n" "str s15, [x9, #0x0]\n" "str s19, [x27, #0x0]\n" "str s23, [x25, #0x0]\n" "str s27, [x23, #0x0]\n" "str s31, [x21, #0x0]\n" - "b 220f\n" - "214:" // Height 6: Partial direct writeback: partial_2_8 - "tbz x16, #1, 215f\n" + "b 214f\n" + "208:" // Height 6: Partial direct writeback: partial_2_8 + "tbz x16, #1, 209f\n" "str d10, [x13], #0x8\n" "str d14, [x9], #0x8\n" "str d18, [x27], #0x8\n" "str d22, [x25], #0x8\n" "str d26, [x23], #0x8\n" "str d30, [x21], #0x8\n" - "tbz x16, #0, 220f\n" + "tbz x16, #0, 214f\n" "st1 { v10.s }[2], [x13]\n" "st1 { v14.s }[2], [x9]\n" "st1 { v18.s }[2], [x27]\n" "st1 { v22.s }[2], [x25]\n" "st1 { v26.s }[2], [x23]\n" "st1 { v30.s }[2], [x21]\n" - "b 220f\n" - "215:" // Height 6: Partial direct writeback: partial_1_8 - "tbz x16, #0, 220f\n" + "b 214f\n" + "209:" // Height 6: Partial direct writeback: partial_1_8 + "tbz x16, #0, 214f\n" "str s10, [x13, #0x0]\n" "str s14, [x9, #0x0]\n" "str s18, [x27, #0x0]\n" "str s22, [x25, #0x0]\n" "str s26, [x23, #0x0]\n" "str s30, [x21, #0x0]\n" - "b 220f\n" - "216:" // Height 6: Partial direct writeback: partial_4_0 - "tbz x16, #2, 218f\n" + "b 214f\n" + "210:" // Height 6: Partial direct writeback: partial_4_0 + "tbz x16, #2, 212f\n" "st1 { v8.4s }, [x13], #0x10\n" "st1 { v12.4s }, [x9], #0x10\n" "st1 { v16.4s }, [x27], #0x10\n" "st1 { v20.4s }, [x25], #0x10\n" "st1 { v24.4s }, [x23], #0x10\n" "st1 { v28.4s }, [x21], #0x10\n" - "tbz x16, #1, 217f\n" + "tbz x16, #1, 211f\n" "str d9, [x13], #0x8\n" "str d13, [x9], #0x8\n" "str d17, [x27], #0x8\n" "str d21, [x25], #0x8\n" "str d25, [x23], #0x8\n" "str d29, [x21], #0x8\n" - "tbz x16, #0, 220f\n" + "tbz x16, #0, 214f\n" "st1 { v9.s }[2], [x13]\n" "st1 { v13.s }[2], [x9]\n" "st1 { v17.s }[2], [x27]\n" "st1 { v21.s }[2], [x25]\n" "st1 { v25.s }[2], [x23]\n" "st1 { v29.s }[2], [x21]\n" - "b 220f\n" - "217:" // Height 6: Partial direct writeback: partial_1_4 - "tbz x16, #0, 220f\n" + "b 214f\n" + "211:" // Height 6: Partial direct writeback: partial_1_4 + "tbz x16, #0, 214f\n" "str s9, [x13, #0x0]\n" "str s13, [x9, #0x0]\n" "str s17, [x27, #0x0]\n" "str s21, [x25, #0x0]\n" "str s25, [x23, #0x0]\n" "str s29, [x21, #0x0]\n" - "b 220f\n" - "218:" // Height 6: Partial direct writeback: partial_2_0 - "tbz x16, #1, 219f\n" + "b 214f\n" + "212:" // Height 6: Partial direct writeback: partial_2_0 + "tbz x16, #1, 213f\n" "str d8, [x13], #0x8\n" "str d12, [x9], #0x8\n" "str d16, [x27], #0x8\n" "str d20, [x25], #0x8\n" "str d24, [x23], #0x8\n" "str d28, [x21], #0x8\n" - "tbz x16, #0, 220f\n" + "tbz x16, #0, 214f\n" "st1 { v8.s }[2], [x13]\n" "st1 { v12.s }[2], [x9]\n" "st1 { v16.s }[2], [x27]\n" "st1 { v20.s }[2], [x25]\n" "st1 { v24.s }[2], [x23]\n" "st1 { v28.s }[2], [x21]\n" - "b 220f\n" - "219:" // Height 6: Partial direct writeback: partial_1_0 + "b 214f\n" + "213:" // Height 6: Partial direct writeback: partial_1_0 "str s8, [x13, #0x0]\n" "str s12, [x9, #0x0]\n" "str s16, [x27, #0x0]\n" "str s20, [x25, #0x0]\n" "str s24, [x23, #0x0]\n" "str s28, [x21, #0x0]\n" - "220:" // Height 6: Partial direct writeback: Done - "b 222f\n" - "221:" // Height 6: Full writeback + "214:" // Height 6: Partial direct writeback: Done + "b 216f\n" + "215:" // Height 6: Full writeback "str q8, [x13, #0x0]\n" "str q9, [x13, #0x10]\n" "str q10, [x13, #0x20]\n" @@ -3643,23 +3577,23 @@ void a64_hybrid_bf16fp32_dot_6x16 ( "add x25, x25, #0x40\n" "add x23, x23, #0x40\n" "add x21, x21, #0x40\n" - "222:" // Height 6: Writeback done + "216:" // Height 6: Writeback done "subs x16, x16, #0x10\n" - "bgt 188b\n" + "bgt 183b\n" "subs %x[M], %x[M], #0x6\n" - "beq 224f\n" + "beq 218f\n" "ldr x20, [%x[args_ptr], %[offsetof_input_offset]]\n" - "tbz %x[flags], #3, 223f\n" + "tbz %x[flags], #3, 217f\n" "add x20, x20, #0x6\n" "str x20, [%x[args_ptr], %[offsetof_input_offset]]\n" "b 1b\n" - "223:" // Update direct input + "217:" // Update direct input "mov x19, #0xc\n" "madd %x[input_ptr], x19, x20, %x[input_ptr]\n" "b 1b\n" - "224:" // Exit + "218:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp index 876b63c811..ca2696bebd 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #if defined(__aarch64__) && (defined(FP16_KERNELS) || defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)) #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg<__fp16>, \ - size_t, size_t, \ - const __fp16 *, \ - IndirectOutputArg<__fp16>, \ - const __fp16 *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg<__fp16>, \ + size_t, size_t, \ + const __fp16 *, \ + IndirectOutputArg<__fp16>, \ + const __fp16 *, Activation, bool namespace arm_gemm { @@ -71,12 +72,10 @@ public: StdTransformsFixed transforms = {}; - static PerformanceParameters get_performance_parameters(const CPUInfo *ci) - { + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { switch (ci->get_cpu_model()) { case CPUModel::A55r1: return { 5.22 }; - default: return { 14.53 }; } diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp index ff6cbec200..32e341566f 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp16_mla_6x32/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -5390,7 +5390,7 @@ void a64_hybrid_fp16_mla_6x32 ( "b 1b\n" "302:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp index 37d0b8f62d..b4c1ba988f 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -24,16 +24,16 @@ #pragma once #ifdef __aarch64__ -#include "../performance_parameters.hpp" #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const float *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const float *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { @@ -70,24 +70,21 @@ public: return true; } + StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { switch (ci->get_cpu_model()) { case CPUModel::A55r1: return { 2.287 }; - case CPUModel::A53: return { 1.43 }; - case CPUModel::A73: return { 2.56 }; - default: return { 6.667 }; } } - StdTransformsFixed transforms = {}; - // Default to the generic kernel kern_type kernel=a64_hybrid_fp32_mla_6x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp index f490aa4852..a645954edd 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_6x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -3421,7 +3421,7 @@ void a64_hybrid_fp32_mla_6x16 ( "b 1b\n" "206:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp index 043d0643f0..2b5cdae652 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_fixed.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const float *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const float *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp index 01118c3c0c..7f3fc898f5 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_fp32_mla_8x4/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -2186,7 +2186,7 @@ void a64_hybrid_fp32_mla_8x4 ( "b 1b\n" "178:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp index 4bb7a1e0eb..5f14072f0a 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { @@ -71,6 +72,15 @@ public: StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 7.5301 }; + default: + return { 27.5482 }; + } + } + // Default to the generic kernel kern_type kernel=a64_hybrid_s8qa_dot_4x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp index 3fb365bc1e..17575bd611 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qa_dot_4x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -2062,7 +2062,7 @@ void a64_hybrid_s8qa_dot_4x16 ( "b 1b\n" "126:" // Exit - : [M] "+r" (M), [flags] "+r" (flags), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp index 6d4f3b2efe..4ddc743f01 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { @@ -71,6 +72,15 @@ public: StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 7.5301 }; + default: + return { 27.5482 }; + } + } + // Default to the generic kernel kern_type kernel=a64_hybrid_s8qs_dot_6x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp index 0e98ab8347..9847e6553b 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8qs_dot_6x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -3603,7 +3603,7 @@ void a64_hybrid_s8qs_dot_6x16 ( "b 1b\n" "170:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp index 16a6f9213a..6b3f84064c 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const int32_t *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const int32_t *, Activation, bool namespace arm_gemm { @@ -71,6 +72,15 @@ public: StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 9.5238, 2.0799, 0.2279 }; + default: + return { 29.6736, 11.4025, 0.5591 }; + } + } + // Default to the generic kernel kern_type kernel=a64_hybrid_s8s32_dot_6x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp index 3257986410..0423a9de11 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_s8s32_dot_6x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -3325,7 +3325,7 @@ void a64_hybrid_s8s32_dot_6x16 ( "b 1b\n" "212:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp index 5b4a7f3e86..acf46205a3 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const uint8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const uint8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { @@ -71,6 +72,15 @@ public: StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 7.5301 }; + default: + return { 27.5482 }; + } + } + // Default to the generic kernel kern_type kernel=a64_hybrid_u8qa_dot_4x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp index ff12472063..2b80285f57 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8qa_dot_4x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -2062,7 +2062,7 @@ void a64_hybrid_u8qa_dot_4x16 ( "b 1b\n" "126:" // Exit - : [M] "+r" (M), [flags] "+r" (flags), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp index 238c1825f3..58fbdcf2a8 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -25,14 +25,15 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const uint8_t *, \ - IndirectOutputArg, \ - const uint32_t *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const uint8_t *, \ + IndirectOutputArg, \ + const uint32_t *, Activation, bool namespace arm_gemm { @@ -71,6 +72,15 @@ public: StdTransformsFixed transforms = {}; + static PerformanceParameters get_performance_parameters(const CPUInfo *ci) { + switch (ci->get_cpu_model()) { + case CPUModel::A55r1: + return { 9.5238, 2.0799, 0.2279 }; + default: + return { 29.6736, 11.4025, 0.5591 }; + } + } + // Default to the generic kernel kern_type kernel=a64_hybrid_u8u32_dot_6x16; diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp index 3c8654147a..89aac84cc5 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_hybrid_u8u32_dot_6x16/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -3325,7 +3325,7 @@ void a64_hybrid_u8u32_dot_6x16 ( "b 1b\n" "212:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp index a3daf079d9..f327e84861 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/a64_sgemm_8x12.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2018 Arm Limited. + * Copyright (c) 2017-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -26,6 +26,7 @@ #ifdef __aarch64__ #include "../std_transforms_fixed.hpp" +#include "../performance_parameters.hpp" namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp index e344d82dc6..63fca129ba 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -28,12 +28,12 @@ #include "../bfloat.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const bfloat16 *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const bfloat16 *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp index 19385e56ea..f5445e72e9 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_bf16fp32_dot_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -252,7 +252,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 7b\n" - "prfm pstl1keep, [x13, #0x0]\n" "tbz %x[flags], #1, 13f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -273,9 +272,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z11.s }, p1, [x13, #3, MUL VL]\n" "addvl x13, x13, #4\n" "14:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 3b\n" "b 86f\n" "15:" // Height 2 @@ -485,8 +483,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 21b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbz %x[flags], #1, 27f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -520,9 +516,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z15.s }, p1, [x9, #3, MUL VL]\n" "addvl x9, x9, #4\n" "28:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 17b\n" "b 86f\n" "29:" // Height 3 @@ -788,9 +783,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 35b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "tbz %x[flags], #1, 41f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -837,9 +829,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z19.s }, p1, [x27, #3, MUL VL]\n" "addvl x27, x27, #4\n" "42:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 31b\n" "b 86f\n" "43:" // Height 4 @@ -1161,10 +1152,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 49b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbz %x[flags], #1, 55f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -1224,9 +1211,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z23.s }, p1, [x25, #3, MUL VL]\n" "addvl x25, x25, #4\n" "56:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 45b\n" "b 86f\n" "57:" // Height 5 @@ -1604,11 +1590,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 63b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbz %x[flags], #1, 69f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -1681,9 +1662,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z27.s }, p1, [x23, #3, MUL VL]\n" "addvl x23, x23, #4\n" "70:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 59b\n" "b 86f\n" "71:" // Height 6 @@ -2119,12 +2099,6 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 77b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbz %x[flags], #1, 83f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -2210,9 +2184,8 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "st1w { z31.s }, p1, [x21, #3, MUL VL]\n" "addvl x21, x21, #4\n" "84:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 73b\n" "subs %x[M], %x[M], #0x6\n" "beq 86f\n" @@ -2227,7 +2200,7 @@ void sve_hybrid_bf16fp32_dot_6x4VL ( "b 1b\n" "86:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp index 0260050f29..aa74ce9a73 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg<__fp16>, \ - size_t, size_t, \ - const __fp16 *, \ - IndirectOutputArg<__fp16>, \ - const __fp16 *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg<__fp16>, \ + size_t, size_t, \ + const __fp16 *, \ + IndirectOutputArg<__fp16>, \ + const __fp16 *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp index b19842b122..bb42dc0e04 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp16_mla_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -328,7 +328,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 7b\n" - "prfm pstl1keep, [x13, #0x0]\n" "tbz %x[flags], #1, 13f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -349,9 +348,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z11.h }, p1, [x13, #3, MUL VL]\n" "addvl x13, x13, #4\n" "14:" // Height 1: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 3b\n" "b 86f\n" "15:" // Height 2 @@ -670,8 +668,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 21b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbz %x[flags], #1, 27f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -705,9 +701,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z15.h }, p1, [x9, #3, MUL VL]\n" "addvl x9, x9, #4\n" "28:" // Height 2: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 17b\n" "b 86f\n" "29:" // Height 3 @@ -1114,9 +1109,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 35b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "tbz %x[flags], #1, 41f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -1163,9 +1155,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z19.h }, p1, [x27, #3, MUL VL]\n" "addvl x27, x27, #4\n" "42:" // Height 3: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 31b\n" "b 86f\n" "43:" // Height 4 @@ -1660,10 +1651,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 49b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbz %x[flags], #1, 55f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -1723,9 +1710,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z23.h }, p1, [x25, #3, MUL VL]\n" "addvl x25, x25, #4\n" "56:" // Height 4: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 45b\n" "b 86f\n" "57:" // Height 5 @@ -2308,11 +2294,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 63b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbz %x[flags], #1, 69f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -2385,9 +2366,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z27.h }, p1, [x23, #3, MUL VL]\n" "addvl x23, x23, #4\n" "70:" // Height 5: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 59b\n" "b 86f\n" "71:" // Height 6 @@ -3060,12 +3040,6 @@ void sve_hybrid_fp16_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 77b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbz %x[flags], #1, 83f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rh { z1.h }, p5/Z, [x19]\n" @@ -3151,9 +3125,8 @@ void sve_hybrid_fp16_mla_6x4VL ( "st1h { z31.h }, p1, [x21, #3, MUL VL]\n" "addvl x21, x21, #4\n" "84:" // Height 6: Writeback done - "mov x19, #0x0\n" - "inch x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "dech x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 73b\n" "subs %x[M], %x[M], #0x6\n" "beq 86f\n" @@ -3168,7 +3141,7 @@ void sve_hybrid_fp16_mla_6x4VL ( "b 1b\n" "86:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp index f0cc70b76e..3c369eb35a 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const float *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const float *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp index 3a6422abd1..db29ebc23c 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -251,7 +251,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 7b\n" - "prfm pstl1keep, [x13, #0x0]\n" "tbz %x[flags], #1, 13f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -272,9 +271,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z11.s }, p1, [x13, #3, MUL VL]\n" "addvl x13, x13, #4\n" "14:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 3b\n" "b 86f\n" "15:" // Height 2 @@ -484,8 +482,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 21b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbz %x[flags], #1, 27f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -519,9 +515,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z15.s }, p1, [x9, #3, MUL VL]\n" "addvl x9, x9, #4\n" "28:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 17b\n" "b 86f\n" "29:" // Height 3 @@ -787,9 +782,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 35b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "tbz %x[flags], #1, 41f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -836,9 +828,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z19.s }, p1, [x27, #3, MUL VL]\n" "addvl x27, x27, #4\n" "42:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 31b\n" "b 86f\n" "43:" // Height 4 @@ -1160,10 +1151,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 49b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbz %x[flags], #1, 55f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -1223,9 +1210,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z23.s }, p1, [x25, #3, MUL VL]\n" "addvl x25, x25, #4\n" "56:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 45b\n" "b 86f\n" "57:" // Height 5 @@ -1603,11 +1589,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 63b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbz %x[flags], #1, 69f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -1680,9 +1661,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z27.s }, p1, [x23, #3, MUL VL]\n" "addvl x23, x23, #4\n" "70:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 59b\n" "b 86f\n" "71:" // Height 6 @@ -2118,12 +2098,6 @@ void sve_hybrid_fp32_mla_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 77b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbz %x[flags], #1, 83f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z1.s }, p5/Z, [x19]\n" @@ -2209,9 +2183,8 @@ void sve_hybrid_fp32_mla_6x4VL ( "st1w { z31.s }, p1, [x21, #3, MUL VL]\n" "addvl x21, x21, #4\n" "84:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x16, x16, x19\n" + "decw x16, ALL, MUL #4\n" + "cmp x16, XZR\n" "bgt 73b\n" "subs %x[M], %x[M], #0x6\n" "beq 86f\n" @@ -2226,7 +2199,7 @@ void sve_hybrid_fp32_mla_6x4VL ( "b 1b\n" "86:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp index 20d9922e93..5238a9ba12 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const float *, \ - IndirectOutputArg, \ - const float *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const float *, \ + IndirectOutputArg, \ + const float *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp index 361e303c7a..0e45b06765 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_fp32_mla_8x1VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -191,7 +191,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 7b\n" - "prfm pstl1keep, [x17, #0x0]\n" "tbz %x[flags], #1, 13f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -203,9 +202,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z24.s }, p1, [x17]\n" "addvl x17, x17, #1\n" "14:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 3b\n" "b 114f\n" "15:" // Height 2 @@ -319,8 +317,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 21b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" "tbz %x[flags], #1, 27f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -336,9 +332,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z25.s }, p1, [x13]\n" "addvl x13, x13, #1\n" "28:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 17b\n" "b 114f\n" "29:" // Height 3 @@ -475,9 +470,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 35b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" "tbz %x[flags], #1, 41f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -497,9 +489,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z26.s }, p1, [x11]\n" "addvl x11, x11, #1\n" "42:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 31b\n" "b 114f\n" "43:" // Height 4 @@ -659,10 +650,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 49b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbz %x[flags], #1, 55f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -686,9 +673,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z27.s }, p1, [x9]\n" "addvl x9, x9, #1\n" "56:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 45b\n" "b 114f\n" "57:" // Height 5 @@ -871,11 +857,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 63b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "tbz %x[flags], #1, 69f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -903,9 +884,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z28.s }, p1, [x27]\n" "addvl x27, x27, #1\n" "70:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 59b\n" "b 114f\n" "71:" // Height 6 @@ -1111,12 +1091,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 77b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbz %x[flags], #1, 83f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -1148,9 +1122,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z29.s }, p1, [x25]\n" "addvl x25, x25, #1\n" "84:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 73b\n" "b 114f\n" "85:" // Height 7 @@ -1379,13 +1352,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 91b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbz %x[flags], #1, 97f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -1421,9 +1387,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z30.s }, p1, [x23]\n" "addvl x23, x23, #1\n" "98:" // Height 7: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 87b\n" "b 114f\n" "99:" // Height 8 @@ -1677,14 +1642,6 @@ void sve_hybrid_fp32_mla_8x1VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x16, x19\n" "bne 105b\n" - "prfm pstl1keep, [x17, #0x0]\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x11, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbz %x[flags], #1, 111f\n" "add x19, %x[args_ptr], %[offset_min]\n" "ld1rw { z17.s }, p2/Z, [x19]\n" @@ -1724,9 +1681,8 @@ void sve_hybrid_fp32_mla_8x1VL ( "st1w { z31.s }, p1, [x21]\n" "addvl x21, x21, #1\n" "112:" // Height 8: Writeback done - "mov x19, #0x0\n" - "incw x19\n" - "subs x6, x6, x19\n" + "decw x6\n" + "cmp x6, XZR\n" "bgt 101b\n" "subs %x[M], %x[M], #0x8\n" "beq 114f\n" @@ -1741,7 +1697,7 @@ void sve_hybrid_fp32_mla_8x1VL ( "b 1b\n" "114:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [bias] "r" (bias), [flags] "r" (flags), [offset_max] "I" (offsetof(KernelArgs, maxval)), [offset_min] "I" (offsetof(KernelArgs, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp index 0150ce8fd9..b4d3f0283d 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp index 2b1448bd65..3c778bfe94 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qa_dot_4x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -226,7 +226,6 @@ void sve_hybrid_s8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 5b\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbnz %x[flags], #31, 13f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z1.s }, p2/Z, [x19]\n" @@ -301,9 +300,8 @@ void sve_hybrid_s8qa_dot_4x4VL ( "st1b { z16.b }, p1, [x9]\n" "addvl x9, x9, #1\n" "15:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 3b\n" "b 62f\n" "16:" // Height 2 @@ -498,17 +496,13 @@ void sve_hybrid_s8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 20b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbnz %x[flags], #31, 28f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z2.s }, p2/Z, [x19]\n" "neg z2.s, p2/M, z2.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "saddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "saddv d11, p0, z11.s\n" "saddv d12, p0, z12.s\n" "mov z11.s, z11.s[0]\n" "mov z12.s, z12.s[0]\n" @@ -624,9 +618,8 @@ void sve_hybrid_s8qa_dot_4x4VL ( "st1b { z20.b }, p1, [x25]\n" "addvl x25, x25, #1\n" "30:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 18b\n" "b 62f\n" "31:" // Height 3 @@ -871,27 +864,20 @@ void sve_hybrid_s8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 35b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbnz %x[flags], #31, 43f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z3.s }, p2/Z, [x19]\n" "neg z3.s, p2/M, z3.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "saddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "saddv d11, p0, z11.s\n" "saddv d12, p0, z12.s\n" - "mov x19, #0x4\n" + "saddv d13, p0, z13.s\n" "mov z11.s, z11.s[0]\n" - "whilelt p0.s, XZR, x19\n" "mov z12.s, z12.s[0]\n" - "saddv d13, p0, z13.s\n" + "mov z13.s, z13.s[0]\n" "mul z11.s, p2/M, z11.s, z3.s\n" "mul z12.s, p2/M, z12.s, z3.s\n" - "mov z13.s, z13.s[0]\n" "mul z13.s, p2/M, z13.s, z3.s\n" "43:" // Height 3: skip row sum fixup "add z16.s, z16.s, z11.s\n" @@ -1048,9 +1034,8 @@ void sve_hybrid_s8qa_dot_4x4VL ( "st1b { z24.b }, p1, [x23]\n" "addvl x23, x23, #1\n" "45:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 33b\n" "b 62f\n" "46:" // Height 4 @@ -1347,33 +1332,23 @@ void sve_hybrid_s8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 50b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbnz %x[flags], #31, 58f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z4.s }, p2/Z, [x19]\n" "neg z4.s, p2/M, z4.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "saddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "saddv d11, p0, z11.s\n" "saddv d12, p0, z12.s\n" - "mov x19, #0x4\n" + "saddv d13, p0, z13.s\n" + "saddv d14, p0, z14.s\n" "mov z11.s, z11.s[0]\n" - "whilelt p0.s, XZR, x19\n" - "mov x19, #0x4\n" "mov z12.s, z12.s[0]\n" - "saddv d13, p0, z13.s\n" - "whilelt p0.s, XZR, x19\n" + "mov z13.s, z13.s[0]\n" + "mov z14.s, z14.s[0]\n" "mul z11.s, p2/M, z11.s, z4.s\n" - "saddv d14, p0, z14.s\n" "mul z12.s, p2/M, z12.s, z4.s\n" - "mov z13.s, z13.s[0]\n" "mul z13.s, p2/M, z13.s, z4.s\n" - "mov z14.s, z14.s[0]\n" "mul z14.s, p2/M, z14.s, z4.s\n" "58:" // Height 4: skip row sum fixup "add z16.s, z16.s, z11.s\n" @@ -1575,9 +1550,8 @@ void sve_hybrid_s8qa_dot_4x4VL ( "st1b { z28.b }, p1, [x21]\n" "addvl x21, x21, #1\n" "60:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 48b\n" "subs %x[M], %x[M], #0x4\n" "beq 62f\n" @@ -1592,7 +1566,7 @@ void sve_hybrid_s8qa_dot_4x4VL ( "b 1b\n" "62:" // Exit - : [M] "+r" (M), [flags] "+r" (flags), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp index d8562898aa..b69b561cce 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp index 4a4af6356c..495637bcdd 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8qs_dot_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -226,7 +226,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 5b\n" - "prfm pstl1keep, [x13, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -306,9 +305,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z8.b }, p1, [x13]\n" "addvl x13, x13, #1\n" "14:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 3b\n" "b 86f\n" "15:" // Height 2 @@ -491,8 +489,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 19b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -613,9 +609,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z12.b }, p1, [x9]\n" "addvl x9, x9, #1\n" "28:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 17b\n" "b 86f\n" "29:" // Height 3 @@ -846,9 +841,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 33b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -1010,9 +1002,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z16.b }, p1, [x27]\n" "addvl x27, x27, #1\n" "42:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 31b\n" "b 86f\n" "43:" // Height 4 @@ -1291,10 +1282,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 47b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -1497,9 +1484,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z20.b }, p1, [x25]\n" "addvl x25, x25, #1\n" "56:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 45b\n" "b 86f\n" "57:" // Height 5 @@ -1826,11 +1812,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 61b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -2074,9 +2055,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z24.b }, p1, [x23]\n" "addvl x23, x23, #1\n" "70:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 59b\n" "b 86f\n" "71:" // Height 6 @@ -2453,12 +2433,6 @@ void sve_hybrid_s8qs_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 75b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "ld1w { z0.s }, p2/Z, [x16]\n" "add z8.s, z8.s, z0.s\n" "ld1w { z1.s }, p2/Z, [x16, #1, MUL VL]\n" @@ -2743,9 +2717,8 @@ void sve_hybrid_s8qs_dot_6x4VL ( "st1b { z28.b }, p1, [x21]\n" "addvl x21, x21, #1\n" "84:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 73b\n" "subs %x[M], %x[M], #0x6\n" "beq 86f\n" @@ -2760,7 +2733,7 @@ void sve_hybrid_s8qs_dot_6x4VL ( "b 1b\n" "86:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [flags] "r" (flags), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_multiplier_ptr] "I" (offsetof(KernelArgs, multiplier_ptr)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_shift_ptr] "I" (offsetof(KernelArgs, shift_ptr)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "p0", "p1", "p2", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp index 1aebedb861..a6652fd1b2 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const int8_t *, \ - IndirectOutputArg, \ - const int32_t *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const int8_t *, \ + IndirectOutputArg, \ + const int32_t *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp index cae9bf329f..b2ebce80d2 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_s8s32_dot_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -228,16 +228,14 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 6b\n" - "prfm pstl1keep, [x13, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" "st1w { z11.s }, p1, [x13, #3, MUL VL]\n" "addvl x13, x13, #4\n" "12:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 3b\n" "b 74f\n" "13:" // Height 2 @@ -434,8 +432,6 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 18b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -447,9 +443,8 @@ void sve_hybrid_s8s32_dot_6x4VL ( "st1w { z15.s }, p1, [x9, #3, MUL VL]\n" "addvl x9, x9, #4\n" "24:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 15b\n" "b 74f\n" "25:" // Height 3 @@ -698,9 +693,6 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 30b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -717,9 +709,8 @@ void sve_hybrid_s8s32_dot_6x4VL ( "st1w { z19.s }, p1, [x27, #3, MUL VL]\n" "addvl x27, x27, #4\n" "36:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 27b\n" "b 74f\n" "37:" // Height 4 @@ -1020,10 +1011,6 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 42b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1045,9 +1032,8 @@ void sve_hybrid_s8s32_dot_6x4VL ( "st1w { z23.s }, p1, [x25, #3, MUL VL]\n" "addvl x25, x25, #4\n" "48:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 39b\n" "b 74f\n" "49:" // Height 5 @@ -1400,11 +1386,6 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 54b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1431,9 +1412,8 @@ void sve_hybrid_s8s32_dot_6x4VL ( "st1w { z27.s }, p1, [x23, #3, MUL VL]\n" "addvl x23, x23, #4\n" "60:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 51b\n" "b 74f\n" "61:" // Height 6 @@ -1840,12 +1820,6 @@ void sve_hybrid_s8s32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 66b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1877,9 +1851,8 @@ void sve_hybrid_s8s32_dot_6x4VL ( "st1w { z31.s }, p1, [x21, #3, MUL VL]\n" "addvl x21, x21, #4\n" "72:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 63b\n" "subs %x[M], %x[M], #0x6\n" "beq 74f\n" @@ -1894,7 +1867,7 @@ void sve_hybrid_s8s32_dot_6x4VL ( "b 1b\n" "74:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp index 964f7cc2c1..bacf2351ac 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const uint8_t *, \ - IndirectOutputArg, \ - const Requantize32 *, const int32_t *, unsigned int + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const uint8_t *, \ + IndirectOutputArg, \ + const Requantize32 *, const int32_t *, unsigned int namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp index 0a6546b78a..52210dca27 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8qa_dot_4x4VL/generic.cpp @@ -226,7 +226,6 @@ void sve_hybrid_u8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 5b\n" - "prfm pstl1keep, [x9, #0x0]\n" "tbnz %x[flags], #31, 13f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z1.s }, p2/Z, [x19]\n" @@ -301,9 +300,8 @@ void sve_hybrid_u8qa_dot_4x4VL ( "st1b { z16.b }, p1, [x9]\n" "addvl x9, x9, #1\n" "15:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 3b\n" "b 62f\n" "16:" // Height 2 @@ -498,17 +496,13 @@ void sve_hybrid_u8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 20b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "tbnz %x[flags], #31, 28f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z2.s }, p2/Z, [x19]\n" "neg z2.s, p2/M, z2.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "uaddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "uaddv d11, p0, z11.s\n" "uaddv d12, p0, z12.s\n" "mov z11.s, z11.s[0]\n" "mov z12.s, z12.s[0]\n" @@ -624,9 +618,8 @@ void sve_hybrid_u8qa_dot_4x4VL ( "st1b { z20.b }, p1, [x25]\n" "addvl x25, x25, #1\n" "30:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 18b\n" "b 62f\n" "31:" // Height 3 @@ -871,27 +864,20 @@ void sve_hybrid_u8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 35b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "tbnz %x[flags], #31, 43f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z3.s }, p2/Z, [x19]\n" "neg z3.s, p2/M, z3.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "uaddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "uaddv d11, p0, z11.s\n" "uaddv d12, p0, z12.s\n" - "mov x19, #0x4\n" + "uaddv d13, p0, z13.s\n" "mov z11.s, z11.s[0]\n" - "whilelt p0.s, XZR, x19\n" "mov z12.s, z12.s[0]\n" - "uaddv d13, p0, z13.s\n" + "mov z13.s, z13.s[0]\n" "mul z11.s, p2/M, z11.s, z3.s\n" "mul z12.s, p2/M, z12.s, z3.s\n" - "mov z13.s, z13.s[0]\n" "mul z13.s, p2/M, z13.s, z3.s\n" "43:" // Height 3: skip row sum fixup "add z16.s, z16.s, z11.s\n" @@ -1048,9 +1034,8 @@ void sve_hybrid_u8qa_dot_4x4VL ( "st1b { z24.b }, p1, [x23]\n" "addvl x23, x23, #1\n" "45:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 33b\n" "b 62f\n" "46:" // Height 4 @@ -1347,33 +1332,23 @@ void sve_hybrid_u8qa_dot_4x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x28, x19\n" "bne 50b\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "tbnz %x[flags], #31, 58f\n" "add x19, %x[qp], %[b_offset]\n" "ld1rw { z4.s }, p2/Z, [x19]\n" "neg z4.s, p2/M, z4.s\n" - "mov x20, #0x4\n" "mov x19, #0x4\n" - "whilelt p0.s, XZR, x20\n" - "uaddv d11, p0, z11.s\n" "whilelt p0.s, XZR, x19\n" + "uaddv d11, p0, z11.s\n" "uaddv d12, p0, z12.s\n" - "mov x19, #0x4\n" + "uaddv d13, p0, z13.s\n" + "uaddv d14, p0, z14.s\n" "mov z11.s, z11.s[0]\n" - "whilelt p0.s, XZR, x19\n" - "mov x19, #0x4\n" "mov z12.s, z12.s[0]\n" - "uaddv d13, p0, z13.s\n" - "whilelt p0.s, XZR, x19\n" + "mov z13.s, z13.s[0]\n" + "mov z14.s, z14.s[0]\n" "mul z11.s, p2/M, z11.s, z4.s\n" - "uaddv d14, p0, z14.s\n" "mul z12.s, p2/M, z12.s, z4.s\n" - "mov z13.s, z13.s[0]\n" "mul z13.s, p2/M, z13.s, z4.s\n" - "mov z14.s, z14.s[0]\n" "mul z14.s, p2/M, z14.s, z4.s\n" "58:" // Height 4: skip row sum fixup "add z16.s, z16.s, z11.s\n" @@ -1575,9 +1550,8 @@ void sve_hybrid_u8qa_dot_4x4VL ( "st1b { z28.b }, p1, [x21]\n" "addvl x21, x21, #1\n" "60:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x12, x12, x19\n" + "decw x12, ALL, MUL #4\n" + "cmp x12, XZR\n" "bgt 48b\n" "subs %x[M], %x[M], #0x4\n" "beq 62f\n" @@ -1592,7 +1566,7 @@ void sve_hybrid_u8qa_dot_4x4VL ( "b 1b\n" "62:" // Exit - : [M] "+r" (M), [flags] "+r" (flags), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [flags] "+&r" (flags), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [b_offset] "I" (offsetof(Requantize32, b_offset)), [c_offset] "I" (offsetof(Requantize32, c_offset)), [col_bias] "r" (col_bias), [maxval] "I" (offsetof(Requantize32, maxval)), [minval] "I" (offsetof(Requantize32, minval)), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)), [per_layer_mul] "I" (offsetof(Requantize32, per_layer_mul)), [per_layer_right_shift] "I" (offsetof(Requantize32, per_layer_right_shift)), [qp] "r" (qp) : "cc", "memory", "p0", "p1", "p2", "x9", "x10", "x11", "x12", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp index af9de4a6eb..8433fa605e 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -27,12 +27,12 @@ #include "../std_transforms_sve.hpp" #define ARGLIST \ - unsigned int, const unsigned int *, \ - IndirectInputArg, \ - size_t, size_t, \ - const uint8_t *, \ - IndirectOutputArg, \ - const uint32_t *, Activation, bool + unsigned int, const unsigned int *, \ + IndirectInputArg, \ + size_t, size_t, \ + const uint8_t *, \ + IndirectOutputArg, \ + const uint32_t *, Activation, bool namespace arm_gemm { diff --git a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp index fc8ce636dd..6ee636d7f6 100644 --- a/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp +++ b/src/core/NEON/kernels/arm_gemm/kernels/sve_hybrid_u8u32_dot_6x4VL/generic.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2020 Arm Limited. + * Copyright (c) 2019-2021 Arm Limited. * * SPDX-License-Identifier: MIT * @@ -228,16 +228,14 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 6b\n" - "prfm pstl1keep, [x13, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" "st1w { z11.s }, p1, [x13, #3, MUL VL]\n" "addvl x13, x13, #4\n" "12:" // Height 1: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 3b\n" "b 74f\n" "13:" // Height 2 @@ -434,8 +432,6 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 18b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -447,9 +443,8 @@ void sve_hybrid_u8u32_dot_6x4VL ( "st1w { z15.s }, p1, [x9, #3, MUL VL]\n" "addvl x9, x9, #4\n" "24:" // Height 2: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 15b\n" "b 74f\n" "25:" // Height 3 @@ -698,9 +693,6 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 30b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -717,9 +709,8 @@ void sve_hybrid_u8u32_dot_6x4VL ( "st1w { z19.s }, p1, [x27, #3, MUL VL]\n" "addvl x27, x27, #4\n" "36:" // Height 3: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 27b\n" "b 74f\n" "37:" // Height 4 @@ -1020,10 +1011,6 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 42b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1045,9 +1032,8 @@ void sve_hybrid_u8u32_dot_6x4VL ( "st1w { z23.s }, p1, [x25, #3, MUL VL]\n" "addvl x25, x25, #4\n" "48:" // Height 4: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 39b\n" "b 74f\n" "49:" // Height 5 @@ -1400,11 +1386,6 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 54b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1431,9 +1412,8 @@ void sve_hybrid_u8u32_dot_6x4VL ( "st1w { z27.s }, p1, [x23, #3, MUL VL]\n" "addvl x23, x23, #4\n" "60:" // Height 5: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 51b\n" "b 74f\n" "61:" // Height 6 @@ -1840,12 +1820,6 @@ void sve_hybrid_u8u32_dot_6x4VL ( "ldr w19, [%x[args_ptr], %[offsetof_num_strings]]\n" "cmp x12, x19\n" "bne 66b\n" - "prfm pstl1keep, [x13, #0x0]\n" - "prfm pstl1keep, [x9, #0x0]\n" - "prfm pstl1keep, [x27, #0x0]\n" - "prfm pstl1keep, [x25, #0x0]\n" - "prfm pstl1keep, [x23, #0x0]\n" - "prfm pstl1keep, [x21, #0x0]\n" "st1w { z8.s }, p4, [x13]\n" "st1w { z9.s }, p3, [x13, #1, MUL VL]\n" "st1w { z10.s }, p2, [x13, #2, MUL VL]\n" @@ -1877,9 +1851,8 @@ void sve_hybrid_u8u32_dot_6x4VL ( "st1w { z31.s }, p1, [x21, #3, MUL VL]\n" "addvl x21, x21, #4\n" "72:" // Height 6: Writeback done - "mov x19, #0x0\n" - "incw x19, ALL, MUL #4\n" - "subs x15, x15, x19\n" + "decw x15, ALL, MUL #4\n" + "cmp x15, XZR\n" "bgt 63b\n" "subs %x[M], %x[M], #0x6\n" "beq 74f\n" @@ -1894,7 +1867,7 @@ void sve_hybrid_u8u32_dot_6x4VL ( "b 1b\n" "74:" // Exit - : [M] "+r" (M), [input_ptr] "+r" (input_ptr), [output_ptr] "+r" (output_ptr) + : [M] "+&r" (M), [input_ptr] "+&r" (input_ptr), [output_ptr] "+&r" (output_ptr) : [args_ptr] "r" (&ka), [flags] "r" (flags), [offsetof_B_ptr] "I" (offsetof(KernelArgs, B_ptr)), [offsetof_N] "I" (offsetof(KernelArgs, N)), [offsetof_input_initial_col] "I" (offsetof(KernelArgs, input_initial_col)), [offsetof_input_offset] "I" (offsetof(KernelArgs, input_offset)), [offsetof_num_strings] "I" (offsetof(KernelArgs, num_strings)), [offsetof_output_offset] "I" (offsetof(KernelArgs, output_offset)), [offsetof_string_lengths] "I" (offsetof(KernelArgs, string_lengths)) : "cc", "memory", "p0", "p1", "p2", "p3", "p4", "p5", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31" ); -- cgit v1.2.1